code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = ['pixel_values']
def __init__(self : List[str] , __UpperCAmelCase : bool = True , __UpperCAmelCase : int = 3_2 , __UpperCAmelCase : str=PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Dict , ) -> None:
"""simple docstring"""
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = size_divisor
UpperCAmelCase__ = resample
super().__init__(**__UpperCAmelCase )
def lowercase_ (self : Any , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[ChannelDimension] = None , **__UpperCAmelCase : Dict ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = get_image_size(__UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCAmelCase__ = height // size_divisor * size_divisor
UpperCAmelCase__ = width // size_divisor * size_divisor
UpperCAmelCase__ = resize(__UpperCAmelCase , (new_h, new_w) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
return image
def lowercase_ (self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : float , __UpperCAmelCase : Optional[ChannelDimension] = None , **__UpperCAmelCase : str ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : List[str] , __UpperCAmelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[TensorType, str]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : Dict , ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = size_divisor if size_divisor is not None else self.size_divisor
UpperCAmelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
UpperCAmelCase__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(__UpperCAmelCase ) for img in images]
if do_resize:
UpperCAmelCase__ = [self.resize(__UpperCAmelCase , size_divisor=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(__UpperCAmelCase , scale=1 / 2_5_5 ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
UpperCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 486
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 486
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__A = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :Dict = XLNetConfig.from_json_file(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
lowerCAmelCase__ :Optional[Any] = finetuning_task
lowerCAmelCase__ :Dict = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCAmelCase__ :str = XLNetForSequenceClassification(_SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
lowerCAmelCase__ :Optional[Any] = finetuning_task
lowerCAmelCase__ :List[str] = XLNetForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ :Any = XLNetLMHeadModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
lowerCAmelCase__ :str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"Save PyTorch model to {os.path.abspath(_SCREAMING_SNAKE_CASE )}" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {os.path.abspath(_SCREAMING_SNAKE_CASE )}" )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
__A = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 560
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__A = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = UNetaDModel
__magic_name__ :Tuple = """sample"""
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 4
lowerCAmelCase__ :Dict = 3
lowerCAmelCase__ :int = (3_2, 3_2)
lowerCAmelCase__ :List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = torch.tensor([1_0] ).to(__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
lowerCAmelCase__ :int = self.dummy_input
return init_dict, inputs_dict
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = UNetaDModel
__magic_name__ :List[str] = """sample"""
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 4
lowerCAmelCase__ :List[Any] = 4
lowerCAmelCase__ :str = (3_2, 3_2)
lowerCAmelCase__ :Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor([1_0] ).to(__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (4, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (4, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
lowerCAmelCase__ :Dict = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :int = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase )
model_accelerate.to(__UpperCAmelCase )
model_accelerate.eval()
lowerCAmelCase__ :List[str] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase__ :List[str] = noise.to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model_accelerate(__UpperCAmelCase , __UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCAmelCase , low_cpu_mem_usage=__UpperCAmelCase )
model_normal_load.to(__UpperCAmelCase )
model_normal_load.eval()
lowerCAmelCase__ :Optional[int] = model_normal_load(__UpperCAmelCase , __UpperCAmelCase )['sample']
assert torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase__ :int = noise.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = torch.tensor([1_0] * noise.shape[0] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ :Tuple = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) )
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = UNetaDModel
__magic_name__ :Optional[int] = """sample"""
@property
def snake_case ( self , __UpperCAmelCase=(3_2, 3_2) ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 4
lowerCAmelCase__ :int = 3
lowerCAmelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=__UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
lowerCAmelCase__ :Any = self.dummy_input
return init_dict, inputs_dict
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.dummy_input
lowerCAmelCase__ :Union[str, Any] = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = noise
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 4
lowerCAmelCase__ :Any = 3
lowerCAmelCase__ :Dict = (2_5_6, 2_5_6)
lowerCAmelCase__ :int = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :str = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ :int = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = 4
lowerCAmelCase__ :List[Any] = 3
lowerCAmelCase__ :Dict = (3_2, 3_2)
lowerCAmelCase__ :Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = torch.tensor(batch_size * [1E-4] ).to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , __UpperCAmelCase ).sample
lowerCAmelCase__ :Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ :Any = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-2 ) )
def snake_case ( self ):
'''simple docstring'''
pass
| 560
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase : Optional[int] = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( _A , _A , _A , unittest.TestCase ):
lowercase = StableUnCLIPImgaImgPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def __a ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowercase = 32
lowercase = embedder_hidden_size
# image encoding components
lowercase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowerCamelCase , projection_dim=__lowerCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase = StableUnCLIPImageNormalizer(embedding_dim=__lowerCamelCase )
lowercase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowerCamelCase , layers_per_block=1 , upcast_attention=__lowerCamelCase , use_linear_projection=__lowerCamelCase , )
torch.manual_seed(0 )
lowercase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase = AutoencoderKL()
lowercase = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def __a ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Dict=0 , __lowerCamelCase : str=True ) -> Tuple:
'''simple docstring'''
if str(__lowerCamelCase ).startswith('''mps''' ):
lowercase = torch.manual_seed(__lowerCamelCase )
else:
lowercase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if pil_image:
lowercase = input_image * 0.5 + 0.5
lowercase = input_image.clamp(0 , 1 )
lowercase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase = DiffusionPipeline.numpy_to_pil(__lowerCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __a ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableUnCLIPImgaImgPipeline(**__lowerCamelCase )
lowercase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = self.get_dummy_inputs(__lowerCamelCase )
inputs.update({'''image_embeds''': None} )
lowercase = sd_pipe(**__lowerCamelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowerCamelCase )
def __a ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __a ( self : Any ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowerCamelCase )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def __a ( self : Union[str, Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase = pipe(__lowerCamelCase , '''anime turle''' , generator=__lowerCamelCase , output_type='''np''' )
lowercase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
def __a ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase = pipe(__lowerCamelCase , '''anime turle''' , generator=__lowerCamelCase , output_type='''np''' )
lowercase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
def __a ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
lowercase = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase = pipe(
__lowerCamelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 604
| 0
|
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( snake_case_ ):
return data[1:] + data[0]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = """"""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = int("""0b""" + data[0] + data[-1],2 )
_A : Any = int("""0b""" + data[1:3],2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[str] = message[:4]
_A : List[Any] = message[4:]
_A : Union[str, Any] = apply_table(snake_case_,snake_case_ )
_A : List[Any] = xor(snake_case_,snake_case_ )
_A : Optional[Any] = apply_sbox(snake_case_,temp[:4] ) # noqa: E741
_A : List[Any] = apply_sbox(snake_case_,temp[4:] )
_A : int = """0""" * (2 - len(snake_case_ )) + l # noqa: E741
_A : Union[str, Any] = """0""" * (2 - len(snake_case_ )) + r
_A : List[Any] = apply_table(l + r,snake_case_ )
_A : Any = xor(snake_case_,snake_case_ )
return temp + right
if __name__ == "__main__":
_snake_case = input("Enter 10 bit key: ")
_snake_case = input("Enter 8 bit message: ")
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 54
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case = random.Random()
def a ( __a , __a=1.0 , __a=None , __a=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ :Tuple = global_rng
UpperCamelCase__ :Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=400 , UpperCamelCase_=2000 , UpperCamelCase_=1 , UpperCamelCase_=0.0 , UpperCamelCase_=16000 , UpperCamelCase_=True , UpperCamelCase_=True , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = parent
UpperCamelCase__ :Dict = batch_size
UpperCamelCase__ :List[str] = min_seq_length
UpperCamelCase__ :str = max_seq_length
UpperCamelCase__ :Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ :Tuple = feature_size
UpperCamelCase__ :Optional[Any] = padding_value
UpperCamelCase__ :Optional[Any] = sampling_rate
UpperCamelCase__ :Optional[Any] = return_attention_mask
UpperCamelCase__ :str = do_normalize
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self , UpperCamelCase_=False , UpperCamelCase_=False ):
'''simple docstring'''
def _flatten(UpperCamelCase_ ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
UpperCamelCase__ :Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase__ :Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ :Optional[Any] = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = WavaVecaFeatureExtractor
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(UpperCamelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ :Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ :List[Any] = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__ :Any = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
UpperCamelCase__ :Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test batched
UpperCamelCase__ :List[str] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
UpperCamelCase__ :Union[str, Any] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ :List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ :Optional[int] = np.asarray(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
UpperCamelCase__ :Union[str, Any] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ :int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ :Tuple = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ :str = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :int = feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ :Dict = range(800 , 1400 , 200 )
UpperCamelCase__ :Tuple = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase__ :Union[str, Any] = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ :List[str] = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Union[str, Any] = feat_extract(UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ )
UpperCamelCase__ :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ :Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ :Union[str, Any] = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
UpperCamelCase__ :Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ :List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ :str = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
UpperCamelCase__ :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase__ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ :List[Any] = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
UpperCamelCase__ :List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import torch
UpperCamelCase__ :Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ :Optional[int] = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase__ :List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ :Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase__ :Any = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase__ :int = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
UpperCamelCase__ :List[str] = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 189
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ = 16
UpperCamelCase__ = 32
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = 16 ):
"""simple docstring"""
lowercase_ : int = AutoTokenizer.from_pretrained("bert-base-cased" )
lowercase_ : Optional[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ : str = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : str = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase_ : Tuple = 8
else:
lowercase_ : Dict = None
return tokenizer.pad(
_UpperCamelCase , padding="longest" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowercase_ : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
lowercase_ : str = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ = mocked_dataloaders # noqa: F811
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCamelCase ) == "1":
lowercase_ : Tuple = 2
# Initialize accelerator
lowercase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : str = config["lr"]
lowercase_ : Optional[int] = int(config["num_epochs"] )
lowercase_ : List[str] = int(config["seed"] )
lowercase_ : List[Any] = int(config["batch_size"] )
lowercase_ : List[Any] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ : Tuple = AdamW(params=model.parameters() , lr=_UpperCamelCase )
lowercase_ , lowercase_ : Any = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate scheduler
lowercase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase_ : Optional[Any] = model(**_UpperCamelCase )
lowercase_ : List[Any] = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ : Optional[int] = model(**_UpperCamelCase )
lowercase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
lowercase_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowercase_ : int = parser.parse_args()
lowercase_ : Optional[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 620
| 0
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase =random.Random()
def __a ( A , A=1.0 , A=None , A=None ) -> Dict:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=4_00 , UpperCamelCase__=20_00 , UpperCamelCase__=20_48 , UpperCamelCase__=1_28 , UpperCamelCase__=1 , UpperCamelCase__=5_12 , UpperCamelCase__=30 , UpperCamelCase__=4_41_00 , ):
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def lowercase_ ( self ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowercase_ ( self , UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowercase__ : Optional[int] = TvltFeatureExtractor
def lowercase_ ( self ):
'''simple docstring'''
A__ = TvltFeatureExtractionTester(self )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "spectrogram_length" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "feature_size" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "num_audio_channels" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "hop_length" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "chunk_length" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "sampling_rate" ) )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
A__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop("mel_filters" )
A__ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(UpperCamelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(UpperCamelCase__ )
A__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop("mel_filters" )
A__ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
A__ = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(UpperCamelCase__ , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
UpperCamelCase__ , return_tensors="np" , sampling_rate=4_41_00 , mask_audio=UpperCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
A__ = np.asarray(UpperCamelCase__ )
A__ = feature_extractor(UpperCamelCase__ , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A__ = ds.sort("id" ).select(range(UpperCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self ):
'''simple docstring'''
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(UpperCamelCase__ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
A__ = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1e-4 ) )
| 261
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase =logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
lowercase__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCAmelCase__ :
lowercase__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
lowercase__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
lowercase__ : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __a ( ) -> Dict:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
A__ = import_module("tasks" )
try:
A__ = getattr(A , model_args.task_type )
A__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
A__ = token_classification_task.get_labels(data_args.labels )
A__ = dict(enumerate(A ) )
A__ = len(A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , idalabel=A , labelaid={label: i for i, label in enumerate(A )} , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
A__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A , A ) -> Tuple[List[int], List[int]]:
A__ = np.argmax(A , axis=2 )
A__ , A__ = preds.shape
A__ = [[] for _ in range(A )]
A__ = [[] for _ in range(A )]
for i in range(A ):
for j in range(A ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A ) -> Dict:
A__ , A__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(A , A ),
"precision": precision_score(A , A ),
"recall": recall_score(A , A ),
"f1": fa_score(A , A ),
}
# Data collator
A__ = DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , A , A )
writer.write("%s = %s\n" % (key, value) )
results.update(A )
# Predict
if training_args.do_predict:
A__ = TokenClassificationDataset(
token_classification_task=A , data_dir=data_args.data_dir , tokenizer=A , labels=A , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
A__ , A__ , A__ = trainer.predict(A )
A__ , A__ = align_predictions(A , A )
A__ = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(A , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , A , A )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
A__ = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(A , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(A , A , A )
return results
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 261
| 1
|
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
__lowercase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A__ ) )
return round(A__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__magic_name__ = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__magic_name__ = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
__magic_name__ = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(_snake_case , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**_snake_case )
UpperCAmelCase = do_lower_case
def snake_case_ ( self , _snake_case , _snake_case=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _snake_case , _snake_case = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 254
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=10 , _lowerCAmelCase=3 , _lowerCAmelCase=32 * 4 , _lowerCAmelCase=32 * 6 , _lowerCAmelCase=4 , _lowerCAmelCase=32 , ) -> Optional[int]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = is_training
lowercase = use_auxiliary_loss
lowercase = num_queries
lowercase = num_channels
lowercase = min_size
lowercase = max_size
lowercase = num_labels
lowercase = mask_feature_size
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
lowercase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
lowercase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
lowercase = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
lowercase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _a ( self ) -> List[Any]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase , lowercase = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = output.encoder_hidden_states
lowercase = output.pixel_decoder_hidden_states
lowercase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_config.decoder_layers )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase = MaskFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = MaskFormerForInstanceSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
lowercase = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__A = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> int:
'''simple docstring'''
lowercase = MaskFormerModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def _a ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def _a ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def _a ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> int:
'''simple docstring'''
pass
def _a ( self ) -> str:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase = MaskFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = (self.model_tester.min_size,) * 2
lowercase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
lowercase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCAmelCase )
lowercase = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def _a ( self ) -> str:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
lowercase = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _a ( self ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase = self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = True
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
lowercase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase_ : Dict = 1e-4
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
lowercase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
lowercase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
lowercase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_lowerCAmelCase )
.eval()
)
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# masks_queries_logits
lowercase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
lowercase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
lowercase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(_lowerCAmelCase )
.eval()
)
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# masks_queries_logits
lowercase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
lowercase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
lowercase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_lowerCAmelCase )
.eval()
)
lowercase = self.default_image_processor
lowercase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowercase = inputs["""pixel_values"""].to(_lowerCAmelCase )
lowercase = [el.to(_lowerCAmelCase ) for el in inputs["""mask_labels"""]]
lowercase = [el.to(_lowerCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 653
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653
| 1
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowerCAmelCase__ = precision
lowerCAmelCase__ = ceil(precision / 14 )
lowerCAmelCase__ = 42_6880 * Decimal(1_0005 ).sqrt()
lowerCAmelCase__ = 1
lowerCAmelCase__ = 1359_1409
lowerCAmelCase__ = Decimal(lowerCAmelCase_ )
for k in range(1 , lowerCAmelCase_ ):
lowerCAmelCase__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase_ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCamelCase = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 61
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = str(lowercase_ )
return len(lowercase_ ) == 9 and set(lowercase_ ) == set('''123456789''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
__SCREAMING_SNAKE_CASE : List[str] = 10_0002 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__SCREAMING_SNAKE_CASE : List[Any] = 100_2003 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 674
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__snake_case , )
assert hasattr(self , '''env''')
def lowerCamelCase ( self : List[str] , _snake_case : List[str]=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowerCamelCase ( self : str , _snake_case : Tuple):
"""simple docstring"""
TrainingJobAnalytics(__snake_case).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __snake_case)
| 705
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A (__A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def A (__A : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCAmelCase_ = s_dict.pop(__A )
elif "subsample" in key:
UpperCAmelCase_ = s_dict.pop(__A )
def A (__A : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(__A , __A , bias=__A )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def A (__A : Dict , __A : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' )
UpperCAmelCase_ = mam_aaa['''args''']
UpperCAmelCase_ = mam_aaa['''model''']
UpperCAmelCase_ = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__A )
rename_keys(__A )
UpperCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase_ = args.share_decoder_input_output_embed
UpperCAmelCase_ = [int(__A ) for i in args.conv_kernel_sizes.split(''',''' )]
UpperCAmelCase_ = SpeechaTextConfig(
vocab_size=__A , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__A ) , conv_channels=args.conv_channels , conv_kernel_sizes=__A , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__A , num_beams=5 , max_length=200 , use_cache=__A , decoder_start_token_id=2 , early_stopping=__A , )
UpperCAmelCase_ = SpeechaTextForConditionalGeneration(__A )
UpperCAmelCase_ , UpperCAmelCase_ = model.model.load_state_dict(__A , strict=__A )
if len(__A ) > 0 and not set(__A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
UpperCAmelCase_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase_ = lm_head_weights
model.save_pretrained(__A )
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case_ : int = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 169
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowercase__ = getattr(__snake_case , __snake_case ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase__ = None
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
elif name.split('''.''' )[0] == "proj":
lowercase__ = fairseq_model.proj
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(__snake_case )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
lowercase__ = '''weight'''
else:
lowercase__ = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowercase__ = emb.weight.data
return lin_layer
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = [line.split(''' ''' )[0] for line in lines]
lowercase__ = len(__snake_case )
lowercase__ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = WavaVecaConfig.from_pretrained(__snake_case )
lowercase__ = SpeechaTextaConfig.from_pretrained(
__snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowercase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(__snake_case )
lowercase__ = recursively_load_weights_wavaveca(model.encoder , __snake_case )
lowercase__ = SpeechaTextaForCausalLM(__snake_case )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowercase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase__ = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
lowercase__ = False
# add projection layer
lowercase__ = nn.Parameter(projection_layer.weight )
lowercase__ = nn.Parameter(projection_layer.bias )
lowercase__ = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
lowercase__ = SpeechaTextaTokenizer(os.path.join(__snake_case , '''vocab.json''' ) )
tokenizer.save_pretrained(__snake_case )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = '''speech_to_text_2'''
lowercase__ = '''wav2vec2'''
lowercase__ = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 43
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_a = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = torchvision.models.resnetaaa(pretrained=__lowerCAmelCase )
lowerCamelCase__ = list(model.children() )[:-2]
lowerCamelCase__ = nn.Sequential(*__lowerCAmelCase )
lowerCamelCase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.pool(self.model(__lowerCAmelCase ) )
lowerCamelCase__ = torch.flatten(__lowerCAmelCase , start_dim=2 )
lowerCamelCase__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __A ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = [json.loads(__lowerCAmelCase ) for l in open(__lowerCAmelCase )]
lowerCamelCase__ = os.path.dirname(__lowerCAmelCase )
lowerCamelCase__ = tokenizer
lowerCamelCase__ = labels
lowerCamelCase__ = len(__lowerCAmelCase )
lowerCamelCase__ = max_seq_length
lowerCamelCase__ = transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=__lowerCAmelCase ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase__ = sentence[: self.max_seq_length]
lowerCamelCase__ = torch.zeros(self.n_classes )
lowerCamelCase__ = 1
lowerCamelCase__ = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
lowerCamelCase__ = self.transforms(__lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [len(row['''sentence'''] ) for row in batch]
lowerCamelCase__ , lowerCamelCase__ = len(__snake_case ), max(__snake_case )
lowerCamelCase__ = torch.zeros(__snake_case ,__snake_case ,dtype=torch.long )
lowerCamelCase__ = torch.zeros(__snake_case ,__snake_case ,dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__snake_case ,__snake_case ) ):
lowerCamelCase__ = input_row['''sentence''']
lowerCamelCase__ = 1
lowerCamelCase__ = torch.stack([row['''image'''] for row in batch] )
lowerCamelCase__ = torch.stack([row['''label'''] for row in batch] )
lowerCamelCase__ = torch.stack([row['''image_start_token'''] for row in batch] )
lowerCamelCase__ = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCAmelCase__() -> Optional[int]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCAmelCase__() -> Any:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] ,std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] ,),
] )
| 481
| 0
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCamelCase ( A_ : Union[str, Any] , A_ : List[Any] , A_ : Dict ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] =1.5
UpperCamelCase__ : Union[str, Any] =int(factor * num_class_images )
UpperCamelCase__ : Optional[Any] =ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=A_ , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=A_ )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCamelCase__ : List[Any] =client.query(text=A_ )
if len(A_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCamelCase__ : Any =int(factor * num_images )
UpperCamelCase__ : Optional[Any] =ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=A_ , aesthetic_weight=0.1 , )
UpperCamelCase__ : Union[str, Any] =0
UpperCamelCase__ : str =0
UpperCamelCase__ : Tuple =tqdm(desc="downloading real regularization images" , total=A_ )
with open(f'''{class_data_dir}/caption.txt''' , "w" ) as fa, open(f'''{class_data_dir}/urls.txt''' , "w" ) as fa, open(
f'''{class_data_dir}/images.txt''' , "w" ) as fa:
while total < num_class_images:
UpperCamelCase__ : List[Any] =class_images[count]
count += 1
try:
UpperCamelCase__ : List[Any] =requests.get(images["url"] )
if img.status_code == 2_0_0:
UpperCamelCase__ : int =Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : str =argparse.ArgumentParser("" , add_help=A_ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=A_ , type=A_ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=A_ , type=A_ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=2_0_0 , type=A_ )
return parser.parse_args()
if __name__ == "__main__":
__UpperCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 704
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase ( A_ : Tuple , A_ : str ) -> List[Any]:
'''simple docstring'''
assert isinstance(A_ , A_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( A_ : List[Any] , A_ : str , A_ : List[str] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Any =tmp_path / "cache"
UpperCamelCase__ : int ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ : Any =ParquetDatasetReader(A_ , cache_dir=A_ , keep_in_memory=A_ ).read()
_check_parquet_dataset(A_ , A_ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Dict =tmp_path / "cache"
UpperCamelCase__ : List[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : int =features.copy() if features else default_expected_features
UpperCamelCase__ : List[str] =(
Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ : Any =ParquetDatasetReader(A_ , features=A_ , cache_dir=A_ ).read()
_check_parquet_dataset(A_ , A_ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCamelCase ( A_ : Optional[Any] , A_ : Tuple , A_ : Optional[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =tmp_path / "cache"
UpperCamelCase__ : Union[str, Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : List[Any] =ParquetDatasetReader(A_ , cache_dir=A_ , split=A_ ).read()
_check_parquet_dataset(A_ , A_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _lowerCamelCase ( A_ : Any , A_ : int , A_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if issubclass(A_ , A_ ):
UpperCamelCase__ : Optional[int] =parquet_path
elif issubclass(A_ , A_ ):
UpperCamelCase__ : List[Any] =[parquet_path]
UpperCamelCase__ : Optional[Any] =tmp_path / "cache"
UpperCamelCase__ : Dict ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : Dict =ParquetDatasetReader(A_ , cache_dir=A_ ).read()
_check_parquet_dataset(A_ , A_ )
def _lowerCamelCase ( A_ : str , A_ : List[str] , A_ : int=("train",) ) -> List[Any]:
'''simple docstring'''
assert isinstance(A_ , A_ )
for split in splits:
UpperCamelCase__ : Dict =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( A_ : List[str] , A_ : Tuple , A_ : List[str] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple =tmp_path / "cache"
UpperCamelCase__ : str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ : Optional[Any] =ParquetDatasetReader(
{"train": parquet_path} , cache_dir=A_ , keep_in_memory=A_ ).read()
_check_parquet_datasetdict(A_ , A_ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( A_ : List[str] , A_ : Dict , A_ : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Dict =tmp_path / "cache"
UpperCamelCase__ : str ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : Any =features.copy() if features else default_expected_features
UpperCamelCase__ : int =(
Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ : str =ParquetDatasetReader({"train": parquet_path} , features=A_ , cache_dir=A_ ).read()
_check_parquet_datasetdict(A_ , A_ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _lowerCamelCase ( A_ : Any , A_ : Dict , A_ : Tuple ) -> Any:
'''simple docstring'''
if split:
UpperCamelCase__ : str ={split: parquet_path}
else:
UpperCamelCase__ : Optional[Any] ="train"
UpperCamelCase__ : Optional[int] ={"train": parquet_path, "test": parquet_path}
UpperCamelCase__ : Any =tmp_path / "cache"
UpperCamelCase__ : List[Any] ={"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ : str =ParquetDatasetReader(A_ , cache_dir=A_ ).read()
_check_parquet_datasetdict(A_ , A_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowerCamelCase ( A_ : Optional[int] , A_ : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : List[Any] =ParquetDatasetWriter(A_ , tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCamelCase__ : str =pq.ParquetFile(tmp_path / "foo.parquet" )
UpperCamelCase__ : Optional[int] =pf.read()
assert dataset.data.table == output_table
def _lowerCamelCase ( A_ : Optional[Any] , A_ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[str] =str(shared_datadir / "test_image_rgb.jpg" )
UpperCamelCase__ : Dict ={"image": [image_path]}
UpperCamelCase__ : Union[str, Any] =Features({"image": Image()} )
UpperCamelCase__ : Dict =Dataset.from_dict(A_ , features=A_ )
UpperCamelCase__ : Any =ParquetDatasetWriter(A_ , tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCamelCase__ : Any =Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase__ : Dict =ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=A_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _lowerCamelCase ( A_ : Any , A_ : List[Any] ) -> Dict:
'''simple docstring'''
assert get_writer_batch_size(A_ ) == expected
| 582
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 453
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : str = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[Any] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 495
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _snake_case :
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return None
class _snake_case :
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return None
class _snake_case ( unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowercase__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , """tf""" , 12 , **SCREAMING_SNAKE_CASE_)
@require_torch
@slow
def lowercase__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , """pt""" , 12 , **SCREAMING_SNAKE_CASE_)
@require_torch
@slow
def lowercase__ ( self):
'''simple docstring'''
from transformers import BertModel
lowercase__ : Union[str, Any] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""") as vocab_file:
vocab_file.write("""\n""".join(SCREAMING_SNAKE_CASE_))
vocab_file.flush()
lowercase__ : Optional[int] = BertTokenizerFast(vocab_file.name)
with TemporaryDirectory() as bert_save_dir:
lowercase__ : Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_)))
model.save_pretrained(SCREAMING_SNAKE_CASE_)
self._test_export(SCREAMING_SNAKE_CASE_ , """pt""" , 12 , SCREAMING_SNAKE_CASE_)
@require_tf
@slow
def lowercase__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ : Any = self._test_export(SCREAMING_SNAKE_CASE_ , """tf""" , 12 , **SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = quantize(Path(SCREAMING_SNAKE_CASE_))
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""")
@require_torch
@slow
def lowercase__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ : Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , """pt""" , 12 , **SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = quantize(SCREAMING_SNAKE_CASE_)
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase__ : List[Any] = Path(SCREAMING_SNAKE_CASE_).joinpath("""model.onnx""")
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_)
@require_torch
@require_tokenizers
@slow
def lowercase__ ( self):
'''simple docstring'''
from transformers import BertModel
lowercase__ : Dict = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random"""))
lowercase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""")
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """pt""")
@require_tf
@require_tokenizers
@slow
def lowercase__ ( self):
'''simple docstring'''
from transformers import TFBertModel
lowercase__ : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random"""))
lowercase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""")
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , """tf""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_))
self.assertTrue(all(var_name in shapes for var_name in variable_names))
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_)
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_)
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""})
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""})
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""})
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowercase__ : Tuple = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowercase__ , lowercase__ : Optional[Any] = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , 3)
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_) , set(SCREAMING_SNAKE_CASE_))
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]))
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase__ , lowercase__ : Any = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , 1)
self.assertEqual(len(SCREAMING_SNAKE_CASE_) , 1)
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""])
self.assertEqual(ordered_input_names[0] , """input_ids""")
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""") , """-test""")
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix())
| 495
| 1
|
import requests
_lowercase : Dict ="YOUR API KEY"
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str = giphy_api_key) -> list:
"""simple docstring"""
a__ : Union[str, Any] = """+""".join(query.split())
a__ : Optional[Any] = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
a__ : List[str] = requests.get(_lowercase).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 136
|
from __future__ import annotations
def lowerCAmelCase_ ( _lowercase : list , _lowercase : int) -> str:
"""simple docstring"""
# Checks if the entire collection has been sorted
if len(_lowercase) <= 1 or n <= 1:
return
insert_next(_lowercase , n - 1)
rec_insertion_sort(_lowercase , n - 1)
def lowerCAmelCase_ ( _lowercase : list , _lowercase : int) -> int:
"""simple docstring"""
# Checks order between adjacent elements
if index >= len(_lowercase) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
a__ , a__ : Optional[Any] = (
collection[index],
collection[index - 1],
)
insert_next(_lowercase , index + 1)
if __name__ == "__main__":
_lowercase : Optional[Any] =input("Enter integers separated by spaces: ")
_lowercase : list[int] =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 136
| 1
|
import os
import sys
import transformers
UpperCamelCase_ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 719
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__UpperCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = _distribute_shards(**__UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = _split_gen_kwargs(__UpperCAmelCase , __UpperCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__UpperCAmelCase ):
_number_of_shards_in_gen_kwargs(__UpperCAmelCase )
else:
UpperCAmelCase_ = _number_of_shards_in_gen_kwargs(__UpperCAmelCase )
assert out == expected
| 561
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : Optional[Any] = ['flax']
def __init__( self : Tuple , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : str ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ) -> Any:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : int = ['flax']
def __init__( self : List[Any] , *__UpperCamelCase : str , **__UpperCamelCase : List[str] ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ) -> str:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : Tuple = ['flax']
def __init__( self : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : str , *__UpperCamelCase : Any , **__UpperCamelCase : str ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : Union[str, Any] = ['flax']
def __init__( self : Optional[int] , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : Optional[int] = ['flax']
def __init__( self : Any , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : str , **__UpperCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : List[Any] = ['flax']
def __init__( self : Tuple , *__UpperCamelCase : Any , **__UpperCamelCase : str ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Tuple ) -> Optional[int]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : Optional[Any] = ['flax']
def __init__( self : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Any ) -> str:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : str ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : int = ['flax']
def __init__( self : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : List[Any] ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : str , *__UpperCamelCase : str , **__UpperCamelCase : str ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : int = ['flax']
def __init__( self : Dict , *__UpperCamelCase : Any , **__UpperCamelCase : Any ) -> List[Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : int , *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : str = ['flax']
def __init__( self : Dict , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Dict , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : str , **__UpperCamelCase : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : int = ['flax']
def __init__( self : Tuple , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Any ) -> List[str]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *__UpperCamelCase : str , **__UpperCamelCase : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : int = ['flax']
def __init__( self : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Dict ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Any ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=_lowerCamelCase ):
A_ : List[Any] = ['flax']
def __init__( self : int , *__UpperCamelCase : Dict , **__UpperCamelCase : Dict ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Any , **__UpperCamelCase : List[Any] ) -> str:
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : Dict , **__UpperCamelCase : int ) -> Any:
requires_backends(cls , ['flax'] )
| 106
|
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_=None , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = embedding_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __snake_case ( self ) -> Dict:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ) -> List[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
lowerCAmelCase = MegatronBertModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ )
lowerCAmelCase = model(A_ , token_type_ids=A_ )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
lowerCAmelCase = MegatronBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = MegatronBertForCausalLM(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = MegatronBertForNextSentencePrediction(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = MegatronBertForPreTraining(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , next_sentence_label=A_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = MegatronBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MegatronBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MegatronBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
lowerCAmelCase = self.num_choices
lowerCAmelCase = MegatronBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Dict = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : List[str] = True
# test_resize_embeddings = False
UpperCAmelCase : Optional[int] = False
def __snake_case ( self , A_ , A_ , A_=False ) -> List[str]:
lowerCAmelCase = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class in get_values(A_ ):
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A_ )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = MegatronBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __snake_case ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*A_ )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*A_ )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*A_ )
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
lowerCAmelCase = os.path.join(os.environ["""MYDIR"""] , A_ )
lowerCAmelCase = MegatronBertModel.from_pretrained(A_ )
model.to(A_ )
model.half()
lowerCAmelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCAmelCase = model(A_ )[0]
lowerCAmelCase = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , A_ )
lowerCAmelCase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
lowerCAmelCase = output[0, ii, jj]
lowerCAmelCase = expected[3 * ii + jj]
lowerCAmelCase = """ii={} jj={} a={} b={}""".format(A_ , A_ , A_ , A_ )
self.assertTrue(math.isclose(A_ , A_ , rel_tol=A_ , abs_tol=A_ ) , msg=A_ )
| 433
| 0
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _SCREAMING_SNAKE_CASE ( A : Tuple , A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
# ===== initialization =====
__snake_case : List[str] = Mock()
__snake_case : int = conn, Mock()
__snake_case : List[Any] = iter([1, None] )
__snake_case : int = lambda A : next(A )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 61
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger()
def _SCREAMING_SNAKE_CASE ( A : int , A : str , A : LevitConfig , A : Path , A : bool = True ) -> Dict:
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__snake_case : Optional[int] = timm.create_model('levit_128s' , pretrained=A )
else:
__snake_case : Tuple = timm.create_model('levit_128' , pretrained=A )
if hidden_sizes == 1_92:
__snake_case : int = timm.create_model('levit_192' , pretrained=A )
if hidden_sizes == 2_56:
__snake_case : List[Any] = timm.create_model('levit_256' , pretrained=A )
if hidden_sizes == 3_84:
__snake_case : int = timm.create_model('levit_384' , pretrained=A )
from_model.eval()
__snake_case : str = LevitForImageClassificationWithTeacher(A ).eval()
__snake_case : int = OrderedDict()
__snake_case : Optional[Any] = from_model.state_dict()
__snake_case : Tuple = list(from_model.state_dict().keys() )
__snake_case : List[str] = list(our_model.state_dict().keys() )
print(len(A ) , len(A ) )
for i in range(len(A ) ):
__snake_case : Optional[int] = weights[og_keys[i]]
our_model.load_state_dict(A )
__snake_case : Tuple = torch.randn((2, 3, 2_24, 2_24) )
__snake_case : Union[str, Any] = from_model(A )
__snake_case : List[str] = our_model(A ).logits
assert torch.allclose(A , A ), "The model logits don't match the original one."
__snake_case : int = name
print(A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__snake_case : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def _SCREAMING_SNAKE_CASE ( A : Path , A : str = None , A : bool = True ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = 'imagenet-1k-id2label.json'
__snake_case : Tuple = 10_00
__snake_case : Dict = (1, num_labels)
__snake_case : List[str] = 'huggingface/label-files'
__snake_case : Any = num_labels
__snake_case : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(A ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = partial(A , num_labels=A , idalabel=A , labelaid=A )
__snake_case : Dict = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__snake_case : Union[str, Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , A , names_to_config[model_name] , A , A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , A , A , A , A )
return config, expected_shape
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
__A = parser.parse_args()
__A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 61
| 1
|
snake_case__ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
snake_case__ = [{'type': 'code', 'content': INSTALL_CONTENT}]
snake_case__ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 395
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = 'https://openaipublic.azureedge.net/jukebox/models/'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def a__ ( snake_case__ : Union[str, Any] ):
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : Dict = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : List[str] = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : Union[str, Any] = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
_UpperCAmelCase : Optional[Any] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
_UpperCAmelCase : int = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase : Union[str, Any] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
_UpperCAmelCase : List[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def a__ ( snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str ):
_UpperCAmelCase : int = {}
import re
_UpperCAmelCase : Tuple = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_UpperCAmelCase : Union[str, Any] = re.compile(
r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Any = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : List[str] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
_UpperCAmelCase : Optional[Any] = re.compile(
r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Optional[Any] = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Union[str, Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
_UpperCAmelCase : Optional[int] = re.compile(
r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
_UpperCAmelCase : Optional[Any] = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_encoder_block_conv_in.match(snake_case__ )
_UpperCAmelCase : Tuple = regex_match.groups()
_UpperCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
_UpperCAmelCase : Union[str, Any] = re_encoder_block_conv_in.sub(snake_case__ , snake_case__ )
elif re_encoder_block_resnet.fullmatch(snake_case__ ):
_UpperCAmelCase : Tuple = re_encoder_block_resnet.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : int = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase : List[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
_UpperCAmelCase : str = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
_UpperCAmelCase : Dict = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCAmelCase : Optional[int] = prefix + resnet_block
_UpperCAmelCase : Any = re_encoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_encoder_block_proj_out.fullmatch(snake_case__ ):
_UpperCAmelCase : List[str] = re_encoder_block_proj_out.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : List[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
_UpperCAmelCase : Tuple = re_encoder_block_proj_out.sub(snake_case__ , snake_case__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case__ ):
_UpperCAmelCase : Any = re_decoder_block_conv_out.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
_UpperCAmelCase : Optional[Any] = re_decoder_block_conv_out.sub(snake_case__ , snake_case__ )
elif re_decoder_block_resnet.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_decoder_block_resnet.match(snake_case__ )
_UpperCAmelCase : Optional[int] = regex_match.groups()
_UpperCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
_UpperCAmelCase : List[str] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
_UpperCAmelCase : Dict = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCAmelCase : List[Any] = prefix + resnet_block
_UpperCAmelCase : Union[str, Any] = re_decoder_block_resnet.sub(snake_case__ , snake_case__ )
elif re_decoder_block_proj_in.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_decoder_block_proj_in.match(snake_case__ )
_UpperCAmelCase : int = regex_match.groups()
_UpperCAmelCase : Union[str, Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
_UpperCAmelCase : Optional[Any] = re_decoder_block_proj_in.sub(snake_case__ , snake_case__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case__ ):
_UpperCAmelCase : Any = re_prior_cond_conv_out.match(snake_case__ )
_UpperCAmelCase : Optional[Any] = regex_match.groups()
_UpperCAmelCase : int = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase : Optional[int] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
_UpperCAmelCase : str = re_prior_cond_conv_out.sub(snake_case__ , snake_case__ )
elif re_prior_cond_resnet.fullmatch(snake_case__ ):
_UpperCAmelCase : str = re_prior_cond_resnet.match(snake_case__ )
_UpperCAmelCase : str = regex_match.groups()
_UpperCAmelCase : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase : str = {"""1""": 1, """3""": 2}[groups[-2]]
_UpperCAmelCase : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
_UpperCAmelCase : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
_UpperCAmelCase : int = prefix + resnet_block
_UpperCAmelCase : str = re_prior_cond_resnet.sub(snake_case__ , snake_case__ )
elif re_prior_cond_proj_in.fullmatch(snake_case__ ):
_UpperCAmelCase : Union[str, Any] = re_prior_cond_proj_in.match(snake_case__ )
_UpperCAmelCase : Optional[int] = regex_match.groups()
_UpperCAmelCase : Optional[Any] = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
_UpperCAmelCase : Tuple = re_prior_cond_proj_in.sub(snake_case__ , snake_case__ )
# keep original key
else:
_UpperCAmelCase : Tuple = original_key
_UpperCAmelCase : List[Any] = replace_key(snake_case__ )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
_UpperCAmelCase : List[str] = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
_UpperCAmelCase : Tuple = original_key
_UpperCAmelCase : List[Any] = original_key
_UpperCAmelCase : int = value
return new_dict
@torch.no_grad()
def a__ ( snake_case__ : Any=None , snake_case__ : Optional[Any]=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
_UpperCAmelCase : Dict = requests.get(f'''{PREFIX}{file}''' , allow_redirects=snake_case__ )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=snake_case__ )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
_UpperCAmelCase : str = MODEL_MAPPING[model_name.split("""/""" )[-1]]
_UpperCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(snake_case__ )
_UpperCAmelCase : int = JukeboxModel(snake_case__ )
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = {}
for i, dict_name in enumerate(snake_case__ ):
_UpperCAmelCase : Any = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
_UpperCAmelCase : Dict = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
_UpperCAmelCase : Any = old_dic[k]
elif k.endswith(""".w""" ):
_UpperCAmelCase : Dict = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase : str = old_dic[k]
else:
_UpperCAmelCase : Dict = old_dic[k]
_UpperCAmelCase : Tuple = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
_UpperCAmelCase : str = fix_jukebox_keys(snake_case__ , model.state_dict() , snake_case__ , snake_case__ )
weight_dict.append(snake_case__ )
_UpperCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case__ )
for i in range(len(snake_case__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(snake_case__ , snake_case__ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 643
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case_ ( datasets.BeamBasedBuilder ):
def snake_case_ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=__lowerCAmelCase , )
def snake_case_ ( self , a_ , a_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def snake_case_ ( self , a_ , a_ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
class snake_case_ ( datasets.BeamBasedBuilder ):
def snake_case_ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=__lowerCAmelCase , )
def snake_case_ ( self , a_ , a_ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def snake_case_ ( self , a_ , a_ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__lowerCAmelCase )
def lowerCAmelCase_ ( ) -> Optional[int]:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def lowerCAmelCase_ ( ) -> Union[str, Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class snake_case_ ( UpperCAmelCase__ ):
@require_beam
def snake_case_ ( self ):
a_ : str = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a_ : Tuple = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
a_ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def snake_case_ ( self ):
import apache_beam as beam
a_ : int = beam.io.parquetio.WriteToParquet
a_ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a_ : Optional[int] = DummyBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
a_ : Optional[Any] = partial(__lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__lowerCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
a_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def snake_case_ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a_ : Any = DummyBeamDataset(cache_dir=__lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def snake_case_ ( self ):
a_ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
a_ : List[Any] = NestedBeamDataset(cache_dir=__lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
a_ : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 710
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=1.0, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> Tuple:
if rng is None:
a_ : Any = global_rng
a_ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case_ ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=4_0_0 , a_=2_0_0_0 , a_=2_0_4_8 , a_=1_2_8 , a_=1 , a_=5_1_2 , a_=3_0 , a_=4_4_1_0_0 , ):
a_ : Optional[Any] = parent
a_ : Tuple = batch_size
a_ : Union[str, Any] = min_seq_length
a_ : Dict = max_seq_length
a_ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ : int = spectrogram_length
a_ : Optional[Any] = feature_size
a_ : Optional[int] = num_audio_channels
a_ : List[str] = hop_length
a_ : Optional[int] = chunk_length
a_ : List[str] = sampling_rate
def snake_case_ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case_ ( self , a_=False , a_=False ):
def _flatten(a_ ):
return list(itertools.chain(*a_ ) )
if equal_length:
a_ : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a_ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a_ : Tuple = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = TvltFeatureExtractor
def snake_case_ ( self ):
a_ : Dict = TvltFeatureExtractionTester(self )
def snake_case_ ( self ):
a_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a_ , "spectrogram_length" ) )
self.assertTrue(hasattr(a_ , "feature_size" ) )
self.assertTrue(hasattr(a_ , "num_audio_channels" ) )
self.assertTrue(hasattr(a_ , "hop_length" ) )
self.assertTrue(hasattr(a_ , "chunk_length" ) )
self.assertTrue(hasattr(a_ , "sampling_rate" ) )
def snake_case_ ( self ):
a_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : List[Any] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
a_ : Tuple = self.feature_extraction_class.from_pretrained(a_ )
a_ : Dict = feat_extract_first.to_dict()
a_ : List[str] = feat_extract_second.to_dict()
a_ : List[Any] = dict_first.pop("mel_filters" )
a_ : str = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def snake_case_ ( self ):
a_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = os.path.join(a_ , "feat_extract.json" )
feat_extract_first.to_json_file(a_ )
a_ : str = self.feature_extraction_class.from_json_file(a_ )
a_ : Dict = feat_extract_first.to_dict()
a_ : str = feat_extract_second.to_dict()
a_ : int = dict_first.pop("mel_filters" )
a_ : List[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def snake_case_ ( self ):
# Initialize feature_extractor
a_ : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
a_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a_ : Union[str, Any] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
a_ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
a_ : int = feature_extractor(a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
a_ : Optional[int] = feature_extractor(
a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=a_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
a_ : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a_ : Union[str, Any] = np.asarray(a_ )
a_ : Dict = feature_extractor(a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case_ ( self , a_ ):
a_ : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a_ : List[str] = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case_ ( self ):
a_ : List[str] = self._load_datasamples(1 )
a_ : Any = TvltFeatureExtractor()
a_ : Union[str, Any] = feature_extractor(a_ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
a_ : Dict = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1e-4 ) )
| 370
| 0
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
__snake_case : Optional[int] = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def lowerCamelCase__ ( A_ , A_ ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCamelCase__ ( A_ ):
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=A_ )
def lowerCamelCase__ ( A_ , A_ ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
UpperCAmelCase_ = tmp_path_factory.getbasetemp() / "cache"
UpperCAmelCase_ = test_hf_cache_home / "datasets"
UpperCAmelCase_ = test_hf_cache_home / "metrics"
UpperCAmelCase_ = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(A_ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(A_ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(A_ ) )
UpperCAmelCase_ = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(A_ ) )
UpperCAmelCase_ = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(A_ ) )
@pytest.fixture(autouse=A_ , scope="session" )
def lowerCamelCase__ ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=A_ )
def lowerCamelCase__ ( A_ ):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , A_ )
@pytest.fixture
def lowerCamelCase__ ( A_ ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , A_ )
| 660
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = "The dog is cute and lives in the garden house"
UpperCAmelCase_ = jnp.array([tokenizer.encode(UpperCamelCase__ )] )
UpperCAmelCase_ = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase_ = model(UpperCamelCase__ )["last_hidden_state"]
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1e-3 ) )
| 660
| 1
|
from __future__ import annotations
from math import ceil, floor, sqrt
def lowercase__ ( __A: int = 2_0_0_0_0_0_0 ):
'''simple docstring'''
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 ,ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] ,1 ):
__magic_name__ : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : str = floor(__A )
__magic_name__ : Optional[int] = ceil(__A )
__magic_name__ : Optional[Any] = triangle_numbers[b_floor]
__magic_name__ : Any = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[Any] = triangle_b_first_guess * triangle_a
__magic_name__ : str = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Tuple = triangle_b_second_guess * triangle_a
__magic_name__ : Union[str, Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 719
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( _lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =BlenderbotSmallTokenizer
UpperCamelCase__ =False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
super().setUp()
__magic_name__ : Union[str, Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__magic_name__ : Tuple = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
__magic_name__ : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__magic_name__ : List[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__magic_name__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def UpperCAmelCase__ ( self : List[str] , **lowerCamelCase_ : Optional[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = '''adapt act apte'''
__magic_name__ : Dict = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
__magic_name__ : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ : str = '''adapt act apte'''
__magic_name__ : Any = ['''adapt''', '''act''', '''ap@@''', '''te''']
__magic_name__ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Union[str, Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__magic_name__ : List[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase__ ( self : int ) -> int:
__magic_name__ : Any = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
__magic_name__ : Dict = '''I am a small frog.'''
__magic_name__ : Tuple = tok([src_text] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
__magic_name__ : Tuple = tok.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
__magic_name__ : Tuple = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__magic_name__ : Any = '''I am a small frog .'''
__magic_name__ : List[str] = '''.'''
__magic_name__ : Tuple = tok(lowerCamelCase_ )['''input_ids''']
__magic_name__ : Optional[Any] = tok(lowerCamelCase_ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 501
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ : Dict = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__magic_name__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102
|
class lowerCamelCase_ :
def __init__( self : Dict , __A : int , __A : Tuple , __A : List[Any] ):
__A : Optional[int] = None
__A : Any = None
__A : int = graph
self._normalize_graph(__A , __A )
__A : str = len(__A )
__A : Optional[int] = None
def lowerCAmelCase_ ( self : int , __A : Any , __A : Optional[Any] ):
if sources is int:
__A : Dict = [sources]
if sinks is int:
__A : Optional[int] = [sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
__A : str = sources[0]
__A : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
__A : Optional[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A : str = max_input_flow
__A : Union[str, Any] = 0
__A : Any = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A : int = max_input_flow
__A : Optional[Any] = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : Optional[Any] , __A : Dict ):
__A : Dict = algorithm(self )
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , __A : str ):
__A : Any = flow_network
__A : int = flow_network.verticesCount
__A : List[Any] = flow_network.sourceIndex
__A : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A : Optional[int] = flow_network.graph
__A : str = False
def lowerCAmelCase_ ( self : List[Any] ):
if not self.executed:
self._algorithm()
__A : Any = True
def lowerCAmelCase_ ( self : List[str] ):
pass
class lowerCamelCase_ ( _lowercase ):
def __init__( self : Any , __A : List[str] ):
super().__init__(__A )
# use this to save your result
__A : str = -1
def lowerCAmelCase_ ( self : Any ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class lowerCamelCase_ ( _lowercase ):
def __init__( self : List[Any] , __A : Dict ):
super().__init__(__A )
__A : Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A : Optional[Any] = [0] * self.verticies_count
__A : Union[str, Any] = [0] * self.verticies_count
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A : Dict = 0
while i < len(__A ):
__A : List[Any] = vertices_list[i]
__A : Optional[Any] = self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
__A : Any = 0
else:
i += 1
__A : Optional[int] = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : Optional[Any] , __A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def lowerCAmelCase_ ( self : Dict , __A : List[str] , __A : Optional[Any] ):
__A : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Optional[Any] , __A : Tuple ):
__A : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A : Dict = self.heights[to_index]
if min_height is not None:
__A : Optional[int] = min_height + 1
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = [0]
UpperCAmelCase_ : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCAmelCase_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCAmelCase_ : str = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCAmelCase_ : int = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 17
| 0
|
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ ):
A : int = n
A : Dict = [None] * self.n
A : int = 0 # index of the first element
A : Any = 0
A : str = 0
def __len__( self ):
return self.size
def _lowerCAmelCase ( self ):
return self.size == 0
def _lowerCAmelCase ( self ):
return False if self.is_empty() else self.array[self.front]
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
A : Optional[Any] = data
A : Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCAmelCase ( self ):
if self.size == 0:
raise Exception("""UNDERFLOW""" )
A : List[Any] = self.array[self.front]
A : Optional[int] = None
A : Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 520
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = old_name
if "patch_embed" in old_name:
A , A , A : Optional[Any] = old_name.split(""".""" )
if layer == "0":
A : Optional[int] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A : List[str] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A : Optional[int] = old_name.replace("""3""" , """convolution2""" )
else:
A : List[Any] = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" , _lowerCAmelCase ):
A : Union[str, Any] = R"""\b\d{2}\b"""
if bool(re.search(_lowerCAmelCase , _lowerCAmelCase ) ):
A : int = re.search(R"""\d\.\d\d.""" , _lowerCAmelCase ).group()
else:
A : int = re.search(R"""\d\.\d.""" , _lowerCAmelCase ).group()
if int(match[0] ) < 6:
A : Any = old_name.replace(_lowerCAmelCase , """""" )
A : Dict = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A : int = """intermediate_stages.""" + trimmed_name
else:
A : Tuple = old_name.replace(_lowerCAmelCase , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A : Optional[Any] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A : int = str(int(match[2] ) - num_meta4D_last_stage )
A : Optional[Any] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A : Optional[Any] = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A : Dict = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A : Dict = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" , _lowerCAmelCase ):
A : Union[str, Any] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A : Any = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A : Union[str, Any] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A : Tuple = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A : int = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A : Dict = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A : int = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A : Any = new_name.replace("""norm""" , """layernorm""" )
A : int = """efficientformer.""" + new_name
else:
A : int = """efficientformer.encoder.""" + new_name
return new_name
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
for key in checkpoint.copy().keys():
A : Union[str, Any] = checkpoint.pop(_lowerCAmelCase )
A : Optional[Any] = val
return checkpoint
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A : int = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
A : int = torch.load(_lowerCAmelCase , map_location="""cpu""" )["""model"""]
A : Tuple = EfficientFormerConfig.from_json_file(_lowerCAmelCase )
A : Optional[int] = EfficientFormerForImageClassificationWithTeacher(_lowerCAmelCase )
A : int = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A : Optional[int] = config.depths[-1] - config.num_metaad_blocks + 1
A : Dict = convert_torch_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
A : Dict = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A : str = prepare_img()
A : int = 256
A : Any = 224
A : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A : int = processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A : Dict = Compose(
[
Resize(_lowerCAmelCase , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_lowerCAmelCase ),
ToTensor(),
Normalize(_lowerCAmelCase , _lowerCAmelCase ),
] )
A : Any = image_transforms(_lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
A : Any = model(_lowerCAmelCase )
A : Any = outputs.logits
A : Any = (1, 1000)
if "l1" in model_name:
A : Tuple = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A : str = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCAmelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_lowerCAmelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE_:Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 520
| 1
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Dict:
__snake_case = [10, 20, 30, 40, 50, 60]
__snake_case = [2, 4, 6, 8, 10, 12]
__snake_case = 100
self.assertEqual(kp.calc_profit(A_ , A_ , A_ ) , 210 )
def lowercase ( self : int ) -> List[Any]:
self.assertRaisesRegex(A_ , '''max_weight must greater than zero.''' )
def lowercase ( self : List[Any] ) -> List[Any]:
self.assertRaisesRegex(A_ , '''Weight can not be negative.''' )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
self.assertRaisesRegex(A_ , '''Profit can not be negative.''' )
def lowercase ( self : Optional[Any] ) -> List[str]:
self.assertRaisesRegex(A_ , '''max_weight must greater than zero.''' )
def lowercase ( self : List[str] ) -> Any:
self.assertRaisesRegex(
A_ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 564
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Union[str, Any] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 564
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __snake_case ( __A ):
SCREAMING_SNAKE_CASE__ = 'table-transformer'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
SCREAMING_SNAKE_CASE__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self ,a_=True ,a_=None ,a_=3 ,a_=100 ,a_=6 ,a_=2048 ,a_=8 ,a_=6 ,a_=2048 ,a_=8 ,a_=0.0 ,a_=0.0 ,a_=True ,a_="relu" ,a_=256 ,a_=0.1 ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1.0 ,a_=False ,a_="sine" ,a_="resnet50" ,a_=True ,a_=False ,a_=1 ,a_=5 ,a_=2 ,a_=1 ,a_=1 ,a_=5 ,a_=2 ,a_=0.1 ,**a_ ,):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase__ = CONFIG_MAPPING['''resnet'''](out_features=['stage4'] )
elif isinstance(a_ ,a_ ):
lowerCAmelCase__ = backbone_config.get('model_type' )
lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ = config_class.from_dict(a_ )
# set timm attributes to None
lowerCAmelCase__ = None, None, None
lowerCAmelCase__ = use_timm_backbone
lowerCAmelCase__ = backbone_config
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = num_queries
lowerCAmelCase__ = d_model
lowerCAmelCase__ = encoder_ffn_dim
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = encoder_attention_heads
lowerCAmelCase__ = decoder_ffn_dim
lowerCAmelCase__ = decoder_layers
lowerCAmelCase__ = decoder_attention_heads
lowerCAmelCase__ = dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = init_std
lowerCAmelCase__ = init_xavier_std
lowerCAmelCase__ = encoder_layerdrop
lowerCAmelCase__ = decoder_layerdrop
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = auxiliary_loss
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = backbone
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = dilation
# Hungarian matcher
lowerCAmelCase__ = class_cost
lowerCAmelCase__ = bbox_cost
lowerCAmelCase__ = giou_cost
# Loss coefficients
lowerCAmelCase__ = mask_loss_coefficient
lowerCAmelCase__ = dice_loss_coefficient
lowerCAmelCase__ = bbox_loss_coefficient
lowerCAmelCase__ = giou_loss_coefficient
lowerCAmelCase__ = eos_coefficient
super().__init__(is_encoder_decoder=a_ ,**a_ )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.d_model
class __snake_case ( __A ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 1e-5
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 12
| 708
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'llama'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
def __init__( self ,a_=3_2000 ,a_=4096 ,a_=1_1008 ,a_=32 ,a_=32 ,a_=None ,a_="silu" ,a_=2048 ,a_=0.02 ,a_=1e-6 ,a_=True ,a_=0 ,a_=1 ,a_=2 ,a_=1 ,a_=False ,a_=None ,**a_ ,):
"""simple docstring"""
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = num_key_value_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = rms_norm_eps
lowerCAmelCase__ = pretraining_tp
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,tie_word_embeddings=a_ ,**a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
lowerCAmelCase__ = self.rope_scaling.get('type' ,a_ )
lowerCAmelCase__ = self.rope_scaling.get('factor' ,a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(a_ ,a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 604
| 0
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 212
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_snake_case : Any = None
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case : int = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
_snake_case : str = {
"moussaKam/mbarthez": 10_24,
"moussaKam/barthez": 10_24,
"moussaKam/barthez-orangesum-title": 10_24,
}
_snake_case : Optional[int] = "▁"
class UpperCamelCase_ ( __a ):
'''simple docstring'''
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Dict = ['''input_ids''', '''attention_mask''']
UpperCamelCase : Optional[Any] = BarthezTokenizer
def __init__( self :str , lowerCAmelCase__ :str=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Union[str, Any]="<s>" , lowerCAmelCase__ :Optional[Any]="</s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :List[str]="<s>" , lowerCAmelCase__ :Dict="<unk>" , lowerCAmelCase__ :int="<pad>" , lowerCAmelCase__ :Optional[Any]="<mask>" , **lowerCAmelCase__ :Optional[int] , ) ->int:
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase = vocab_file
lowercase = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) ->List[int]:
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 441
| 0
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
A : List[Any] = 5
A : Optional[int] = 10
@require_sentencepiece
@require_tokenizers
class _lowercase ( __a , unittest.TestCase):
"""simple docstring"""
A__ = SpeechaTextTokenizer
A__ = False
A__ = True
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowerCamelCase__ : List[Any] = sp.SentencePieceProcessor()
spm_model.Load(snake_case__ )
lowerCamelCase__ : Any = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case__ ) )]
lowerCamelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCamelCase__ : Union[str, Any] = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCamelCase__ : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : int = "<pad>"
lowerCamelCase__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case__ ) , 1001 )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [289, 50, 14, 174, 386] , )
lowerCamelCase__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCamelCase__ : Dict = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCamelCase__ : str = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class _lowercase ( unittest.TestCase):
"""simple docstring"""
A__ = '''valhalla/s2t_mustc_multilinguial_medium'''
A__ = '''C\'est trop cool'''
A__ = '''Esto es genial'''
@classmethod
def lowerCAmelCase ( cls : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
lowerCamelCase__ : Dict = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCamelCase__ : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCamelCase__ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Any = "fr"
lowerCamelCase__ : Any = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : str = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCamelCase__ : Optional[int] = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 705
|
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_A )
if number < 0:
return False
lowerCamelCase__ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
| 0
|
'''simple docstring'''
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCamelCase_ = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : str=None , _lowerCamelCase : Tuple=None ) -> List[Any]:
_lowerCAmelCase : Tuple = True
while ask_again:
_lowerCAmelCase : Any = input(lowerCamelCase_ )
try:
if default is not None and len(lowerCamelCase_ ) == 0:
return default
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase_ )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=[] , _lowerCamelCase : Dict=None , _lowerCamelCase : Union[str, Any]=0 ) -> List[Any]:
_lowerCAmelCase : List[str] = BulletMenu(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : int = menu.run(default_choice=lowerCamelCase_ )
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> str:
_lowerCAmelCase : str = int(lowerCamelCase_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[str]:
_lowerCAmelCase : Dict = int(lowerCamelCase_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase : Optional[Any] = int(lowerCamelCase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCAmelCase ( _lowerCamelCase : int ) -> Any:
_lowerCAmelCase : List[Any] = int(lowerCamelCase_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> List[str]:
_lowerCAmelCase : str = int(lowerCamelCase_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> Union[str, Any]:
return {"yes": True, "no": False}[value.lower()]
class a_ (argparse.RawDescriptionHelpFormatter ):
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = super()._format_usage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
_lowerCAmelCase : List[Any] = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 384
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__A : List[Any] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
snake_case_ : int = []
for num in range(len(lowerCamelCase_ ) ):
snake_case_ : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
snake_case_ : List[str] = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def UpperCAmelCase ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 334
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_A : List[str] ={'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int =['''DPTFeatureExtractor''']
_A : Tuple =['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] =[
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_A : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 631
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : Optional[Any] =logging.get_logger(__name__)
_A : Dict ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Tuple ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_A : List[Any] ={
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
def __init__( self: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Tuple=None , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Any="<|endoftext|>" , UpperCamelCase__: Union[str, Any]="<|endoftext|>" , UpperCamelCase__: Tuple=False , **UpperCamelCase__: str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Any = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
def lowerCamelCase_ ( self: int , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : int = input_ids[-self.model_max_length :]
return input_ids
| 631
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowercase : int = logging.get_logger(__name__)
lowercase : List[Any] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = '''layoutlmv3'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict=5_0_2_6_5 , SCREAMING_SNAKE_CASE : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE : Dict=1_2 , SCREAMING_SNAKE_CASE : List[str]=1_2 , SCREAMING_SNAKE_CASE : Any=3_0_7_2 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Dict=0.0_2 , SCREAMING_SNAKE_CASE : Dict=1E-5 , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=0 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE : List[str]=1_2_8 , SCREAMING_SNAKE_CASE : List[str]=1_2_8 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_2_8 , SCREAMING_SNAKE_CASE : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE : List[Any]=2_5_6 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=2_2_4 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : int=1_6 , SCREAMING_SNAKE_CASE : Any=None , **SCREAMING_SNAKE_CASE : Dict , ) -> Any:
"""simple docstring"""
super().__init__(
vocab_size=__snake_case , hidden_size=__snake_case , num_hidden_layers=__snake_case , num_attention_heads=__snake_case , intermediate_size=__snake_case , hidden_act=__snake_case , hidden_dropout_prob=__snake_case , attention_probs_dropout_prob=__snake_case , max_position_embeddings=__snake_case , type_vocab_size=__snake_case , initializer_range=__snake_case , layer_norm_eps=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
lowerCAmelCase = max_ad_position_embeddings
lowerCAmelCase = coordinate_size
lowerCAmelCase = shape_size
lowerCAmelCase = has_relative_attention_bias
lowerCAmelCase = rel_pos_bins
lowerCAmelCase = max_rel_pos
lowerCAmelCase = has_spatial_attention_bias
lowerCAmelCase = rel_ad_pos_bins
lowerCAmelCase = max_rel_ad_pos
lowerCAmelCase = text_embed
lowerCAmelCase = visual_embed
lowerCAmelCase = input_size
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = classifier_dropout
class _lowerCAmelCase ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = version.parse('1.12' )
@property
def __A ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def __A ( self : Tuple ) -> float:
"""simple docstring"""
return 1E-5
@property
def __A ( self : Any ) -> int:
"""simple docstring"""
return 1_2
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : "ProcessorMixin" , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : int = 4_0 , SCREAMING_SNAKE_CASE : int = 4_0 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , "apply_ocr" , __snake_case )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase = processor.tokenizer.num_special_tokens_to_add(__snake_case )
lowerCAmelCase = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCAmelCase = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCAmelCase = self._generate_dummy_images(__snake_case , __snake_case , __snake_case , __snake_case )
lowerCAmelCase = dict(
processor(
__snake_case , text=__snake_case , boxes=__snake_case , return_tensors=__snake_case , ) )
return inputs
| 649
|
import argparse
import datetime
def __lowercase ( __lowerCAmelCase : str ):
a__ = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
a__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__lowerCAmelCase ) < 1_1:
raise ValueError('Must be 10 characters long' )
# Get month
a__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('Month must be between 1 - 12' )
a__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
a__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
a__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
a__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
a__ = datetime.date(int(__lowerCAmelCase ) , int(__lowerCAmelCase ) , int(__lowerCAmelCase ) )
# Start math
if m <= 2:
a__ = y - 1
a__ = m + 1_2
# maths var
a__ = int(str(__lowerCAmelCase )[:2] )
a__ = int(str(__lowerCAmelCase )[2:] )
a__ = int(2.6 * m - 5.39 )
a__ = int(c / 4 )
a__ = int(k / 4 )
a__ = int(d + k )
a__ = int(t + u + v + x )
a__ = int(z - (2 * c) )
a__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
a__ = F'Your date {date_input}, is a {days[str(__lowerCAmelCase )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Any = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
snake_case : str = parser.parse_args()
zeller(args.date_input)
| 335
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''xlm-roberta-xl'''
def __init__( self : Any , _UpperCAmelCase : Optional[int]=250880 , _UpperCAmelCase : int=2560 , _UpperCAmelCase : Optional[Any]=36 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Optional[int]=10240 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=514 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Tuple=1e-05 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[int]="absolute" , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 720
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14
| 0
|
"""simple docstring"""
def _A () -> int:
"""simple docstring"""
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(__a , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 512
|
"""simple docstring"""
def _A (__a ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 512
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ):
"""simple docstring"""
a_ : dict[int, int] = {}
a_ : Union[str, Any] = 2
while True:
a_ : Dict = factor_map.pop(A_ , A_ )
if factor:
a_ : List[Any] = factor + prime
while x in factor_map:
x += factor
a_ : List[str] = factor
else:
a_ : int = prime
yield prime
prime += 1
def _snake_case ( A_ : float = 1E10 ):
"""simple docstring"""
a_ : Dict = sieve()
a_ : Optional[Any] = 1
while True:
a_ : Optional[int] = next(A_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(A_ )
n += 2
if __name__ == "__main__":
print(solution())
| 460
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = """ylacombe/bark-small"""
a_ : Dict = tempfile.mkdtemp()
a_ : Union[str, Any] = """en_speaker_1"""
a_ : Dict = """This is a test string"""
a_ : Optional[int] = """speaker_embeddings_path.json"""
a_ : int = """speaker_embeddings"""
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.get_tokenizer()
a_ : Optional[Any] = BarkProcessor(tokenizer=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
a_ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a_ : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a_ : Dict = 35
a_ : List[Any] = 2
a_ : Optional[int] = 8
a_ : int = {
"""semantic_prompt""": np.ones(lowerCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
a_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase_ )
a_ : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
a_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase_ , **lowerCAmelCase_ )
a_ : Any = processor(text=self.input_string , voice_preset=lowerCAmelCase_ )
a_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
a_ : Any = processor(text=self.input_string , voice_preset=self.voice_preset )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.get_tokenizer()
a_ : Union[str, Any] = BarkProcessor(tokenizer=lowerCAmelCase_ )
a_ : Optional[int] = processor(text=self.input_string )
a_ : Optional[int] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 460
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class _SCREAMING_SNAKE_CASE ( snake_case__ ):
def __init__( self : Optional[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _UpperCAmelCase ( self : List[str] , snake_case_ : int=None , snake_case_ : List[Any]=None , snake_case_ : List[str]=None ):
"""simple docstring"""
A : List[Any] = {}
A : Any = {}
if prompt is not None:
A : int = prompt
if generate_kwargs is not None:
A : int = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
A : int = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : int , snake_case_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **snake_case_ : Union[str, Any] ):
"""simple docstring"""
return super().__call__(__lowercase , **__lowercase )
def _UpperCAmelCase ( self : int , snake_case_ : Union[str, Any] , snake_case_ : int=None ):
"""simple docstring"""
A : Any = load_image(__lowercase )
if prompt is not None:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(
f"""Received an invalid text input, got - {type(__lowercase )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
A : Optional[Any] = self.model.config.model_type
if model_type == "git":
A : Dict = self.image_processor(images=__lowercase , return_tensors=self.framework )
A : int = self.tokenizer(text=__lowercase , add_special_tokens=__lowercase ).input_ids
A : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
A : Any = torch.tensor(__lowercase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
A : List[str] = self.image_processor(images=__lowercase , header_text=__lowercase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A : List[Any] = self.image_processor(images=__lowercase , return_tensors=self.framework )
A : Union[str, Any] = self.tokenizer(__lowercase , return_tensors=self.framework )
model_inputs.update(__lowercase )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
A : Union[str, Any] = self.image_processor(images=__lowercase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
A : Optional[Any] = None
return model_inputs
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Any=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowercase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
A : Optional[Any] = None
if generate_kwargs is None:
A : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A : Tuple = model_inputs.pop(self.model.main_input_name )
A : List[Any] = self.model.generate(__lowercase , **__lowercase , **__lowercase )
return model_outputs
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : Optional[int] ):
"""simple docstring"""
A : List[str] = []
for output_ids in model_outputs:
A : Union[str, Any] = {
'''generated_text''': self.tokenizer.decode(
__lowercase , skip_special_tokens=__lowercase , )
}
records.append(__lowercase )
return records
| 256
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple =torch.exp(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =torch.sum(SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
__lowerCamelCase : Optional[int] =torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(SCREAMING_SNAKE_CASE ) - B / A
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self :List[str] , __lowercase :int ):
super().__init__()
__lowerCamelCase : str =config.output_attentions
__lowerCamelCase : List[Any] =config.output_hidden_states
__lowerCamelCase : Dict =nn.ModuleList([BertLayer(__lowercase ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase : str =nn.ModuleList([BertHighway(__lowercase ) for _ in range(config.num_hidden_layers )] )
__lowerCamelCase : Optional[Any] =[-1 for _ in range(config.num_hidden_layers )]
def __lowercase ( self :Union[str, Any] , __lowercase :Union[str, Any] ):
if (type(__lowercase ) is float) or (type(__lowercase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__lowerCamelCase : Tuple =x
else:
__lowerCamelCase : Any =x
def __lowercase ( self :Union[str, Any] , __lowercase :Tuple ):
__lowerCamelCase : Union[str, Any] =pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowercase ( self :Tuple , __lowercase :Optional[int] , __lowercase :Dict=None , __lowercase :Union[str, Any]=None , __lowercase :List[str]=None , __lowercase :str=None , ):
__lowerCamelCase : Any =()
__lowerCamelCase : List[str] =()
__lowerCamelCase : Optional[int] =()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__lowerCamelCase : int =all_hidden_states + (hidden_states,)
__lowerCamelCase : List[Any] =layer_module(
__lowercase , __lowercase , head_mask[i] , __lowercase , __lowercase )
__lowerCamelCase : Optional[int] =layer_outputs[0]
if self.output_attentions:
__lowerCamelCase : Optional[Any] =all_attentions + (layer_outputs[1],)
__lowerCamelCase : Any =(hidden_states,)
if self.output_hidden_states:
__lowerCamelCase : Optional[Any] =current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase : Dict =current_outputs + (all_attentions,)
__lowerCamelCase : str =self.highway[i](__lowercase )
# logits, pooled_output
if not self.training:
__lowerCamelCase : Tuple =highway_exit[0]
__lowerCamelCase : Tuple =entropy(__lowercase )
__lowerCamelCase : Tuple =highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCamelCase : Optional[int] =all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCamelCase : Dict =(highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowercase , i + 1 )
else:
__lowerCamelCase : Union[str, Any] =all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCamelCase : Optional[Any] =all_hidden_states + (hidden_states,)
__lowerCamelCase : List[Any] =(hidden_states,)
if self.output_hidden_states:
__lowerCamelCase : Tuple =outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCamelCase : Optional[int] =outputs + (all_attentions,)
__lowerCamelCase : int =outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :str ):
super().__init__(__lowercase )
__lowerCamelCase : Union[str, Any] =config
__lowerCamelCase : List[str] =BertEmbeddings(__lowercase )
__lowerCamelCase : Dict =DeeBertEncoder(__lowercase )
__lowerCamelCase : List[Any] =BertPooler(__lowercase )
self.init_weights()
def __lowercase ( self :Tuple ):
self.encoder.init_highway_pooler(self.pooler )
def __lowercase ( self :Dict ):
return self.embeddings.word_embeddings
def __lowercase ( self :List[str] , __lowercase :int ):
__lowerCamelCase : Union[str, Any] =value
def __lowercase ( self :List[Any] , __lowercase :Dict ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowercase )
@add_start_docstrings_to_model_forward(__lowercase )
def __lowercase ( self :Optional[Any] , __lowercase :List[str]=None , __lowercase :List[Any]=None , __lowercase :Any=None , __lowercase :Tuple=None , __lowercase :Union[str, Any]=None , __lowercase :Optional[Any]=None , __lowercase :Union[str, Any]=None , __lowercase :Tuple=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowerCamelCase : List[str] =input_ids.size()
elif inputs_embeds is not None:
__lowerCamelCase : str =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowerCamelCase : Optional[int] =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCamelCase : str =torch.ones(__lowercase , device=__lowercase )
if encoder_attention_mask is None:
__lowerCamelCase : Tuple =torch.ones(__lowercase , device=__lowercase )
if token_type_ids is None:
__lowerCamelCase : List[Any] =torch.zeros(__lowercase , dtype=torch.long , device=__lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCamelCase : torch.Tensor =self.get_extended_attention_mask(__lowercase , __lowercase , __lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCamelCase : List[str] =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCamelCase : Any =encoder_attention_mask[:, None, None, :]
__lowerCamelCase : Optional[Any] =encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__lowerCamelCase : List[str] =(1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCamelCase : Union[str, Any] =self.get_head_mask(__lowercase , self.config.num_hidden_layers )
__lowerCamelCase : str =self.embeddings(
input_ids=__lowercase , position_ids=__lowercase , token_type_ids=__lowercase , inputs_embeds=__lowercase )
__lowerCamelCase : Dict =self.encoder(
__lowercase , attention_mask=__lowercase , head_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__lowerCamelCase : int =encoder_outputs[0]
__lowerCamelCase : Tuple =self.pooler(__lowercase )
__lowerCamelCase : int =(
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :Optional[Any] , __lowercase :Dict ):
__lowerCamelCase : List[Any] =message
__lowerCamelCase : int =exit_layer # start from 1!
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Any , __lowercase :str ):
super().__init__()
__lowerCamelCase : str =BertPooler(__lowercase )
__lowerCamelCase : Union[str, Any] =nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : List[str] =nn.Linear(config.hidden_size , config.num_labels )
def __lowercase ( self :Union[str, Any] , __lowercase :List[str] ):
# Pooler
__lowerCamelCase : Optional[Any] =encoder_outputs[0]
__lowerCamelCase : Any =self.pooler(__lowercase )
# "return" pooler_output
# BertModel
__lowerCamelCase : List[str] =(pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCamelCase : List[Any] =bmodel_output[1]
__lowerCamelCase : Optional[Any] =self.dropout(__lowercase )
__lowerCamelCase : int =self.classifier(__lowercase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :Dict ):
super().__init__(__lowercase )
__lowerCamelCase : Any =config.num_labels
__lowerCamelCase : int =config.num_hidden_layers
__lowerCamelCase : Tuple =DeeBertModel(__lowercase )
__lowerCamelCase : Optional[int] =nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : Optional[int] =nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowercase )
def __lowercase ( self :List[str] , __lowercase :List[str]=None , __lowercase :str=None , __lowercase :Optional[Any]=None , __lowercase :List[Any]=None , __lowercase :Union[str, Any]=None , __lowercase :Dict=None , __lowercase :int=None , __lowercase :int=-1 , __lowercase :List[str]=False , ):
__lowerCamelCase : Union[str, Any] =self.num_layers
try:
__lowerCamelCase : Union[str, Any] =self.bert(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , position_ids=__lowercase , head_mask=__lowercase , inputs_embeds=__lowercase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCamelCase : List[Any] =outputs[1]
__lowerCamelCase : Optional[Any] =self.dropout(__lowercase )
__lowerCamelCase : Tuple =self.classifier(__lowercase )
__lowerCamelCase : int =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCamelCase : Union[str, Any] =e.message
__lowerCamelCase : Optional[Any] =e.exit_layer
__lowerCamelCase : Any =outputs[0]
if not self.training:
__lowerCamelCase : List[Any] =entropy(__lowercase )
__lowerCamelCase : Union[str, Any] =[]
__lowerCamelCase : int =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Union[str, Any] =MSELoss()
__lowerCamelCase : List[Any] =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : Dict =CrossEntropyLoss()
__lowerCamelCase : List[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCamelCase : str =[]
for highway_exit in outputs[-1]:
__lowerCamelCase : List[str] =highway_exit[0]
if not self.training:
highway_logits_all.append(__lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Optional[int] =MSELoss()
__lowerCamelCase : Optional[Any] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : int =CrossEntropyLoss()
__lowerCamelCase : int =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowercase )
if train_highway:
__lowerCamelCase : Dict =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCamelCase : List[str] =(loss,) + outputs
if not self.training:
__lowerCamelCase : List[Any] =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCamelCase : Dict =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 179
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
A_ = pytest.mark.integration
@pytest.mark.parametrize('path' ,['paws', 'csv'] )
def A ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
inspect_dataset(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : str = path + '.py'
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' ,['accuracy'] )
def A ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
inspect_metric(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : List[Any] = path + '.py'
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def A ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = get_dataset_config_info(_UpperCAmelCase ,config_name=_UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def A ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
with pytest.raises(_UpperCAmelCase ):
get_dataset_config_info(_UpperCAmelCase ,config_name=_UpperCAmelCase )
@pytest.mark.parametrize(
'path, expected' ,[
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] ,)
def A ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ) -> int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = get_dataset_config_names(_UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' ,[
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] ,)
def A ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : str ) -> str:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = get_dataset_infos(_UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
__lowerCAmelCase : List[str] = expected_configs[0]
assert expected_config in infos
__lowerCAmelCase : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def A ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = get_dataset_infos(_UpperCAmelCase )
assert expected_config in infos
__lowerCAmelCase : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def A ( _UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(_UpperCAmelCase ):
get_dataset_split_names(_UpperCAmelCase ,config_name=_UpperCAmelCase )
| 123
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A_ = typing.Union[np.floataa, int, float] # noqa: UP007
def A ( _UpperCAmelCase : Vector ,_UpperCAmelCase : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCAmelCase ) - np.asarray(_UpperCAmelCase )) ** 2 ) )
def A ( _UpperCAmelCase : Vector ,_UpperCAmelCase : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def A ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' ,number=1_0_0_0_0 ,globals=globals() ,) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' ,number=1_0_0_0_0 ,globals=globals() ,) )
benchmark()
| 123
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = '''roberta-prelayernorm'''
def __init__( self , A__=5_0265 , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=2 , A__=0.02 , A__=1E-12 , A__=1 , A__=0 , A__=2 , A__="absolute" , A__=True , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
UpperCAmelCase_: str = vocab_size
UpperCAmelCase_: List[Any] = hidden_size
UpperCAmelCase_: Optional[int] = num_hidden_layers
UpperCAmelCase_: Optional[Any] = num_attention_heads
UpperCAmelCase_: List[str] = hidden_act
UpperCAmelCase_: List[str] = intermediate_size
UpperCAmelCase_: List[str] = hidden_dropout_prob
UpperCAmelCase_: str = attention_probs_dropout_prob
UpperCAmelCase_: Any = max_position_embeddings
UpperCAmelCase_: Optional[Any] = type_vocab_size
UpperCAmelCase_: List[str] = initializer_range
UpperCAmelCase_: Union[str, Any] = layer_norm_eps
UpperCAmelCase_: List[str] = position_embedding_type
UpperCAmelCase_: Optional[Any] = use_cache
UpperCAmelCase_: str = classifier_dropout
class UpperCAmelCase__ ( snake_case__ ):
@property
def snake_case_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_: int = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_: str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 137
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ :
def __init__( self , A__=False , A__=False , A__=6.0 , A__=None , A__=False , A__=False , A__=None , A__="fp4" , A__=False , **A__ , ):
"""simple docstring"""
UpperCAmelCase_: str = load_in_abit
UpperCAmelCase_: Optional[int] = load_in_abit
UpperCAmelCase_: Optional[Any] = llm_inta_threshold
UpperCAmelCase_: int = llm_inta_skip_modules
UpperCAmelCase_: int = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase_: int = llm_inta_has_fpaa_weight
UpperCAmelCase_: Union[str, Any] = bnb_abit_quant_type
UpperCAmelCase_: Tuple = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase_: Dict = torch.floataa
elif isinstance(A__ , A__ ):
UpperCAmelCase_: Union[str, Any] = getattr(A__ , A__ )
elif isinstance(A__ , torch.dtype ):
UpperCAmelCase_: str = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def snake_case_ ( self ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , A__ ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , A__ ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , A__ ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , A__ ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , A__ ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , A__ ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def snake_case_ ( self ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def snake_case_ ( self ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def snake_case_ ( cls , A__ , A__ , **A__ ):
"""simple docstring"""
UpperCAmelCase_: str = cls(**A__ )
UpperCAmelCase_: List[Any] = []
for key, value in kwargs.items():
if hasattr(A__ , A__ ):
setattr(A__ , A__ , A__ )
to_remove.append(A__ )
for key in to_remove:
kwargs.pop(A__ , A__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def snake_case_ ( self , A__ ):
"""simple docstring"""
with open(A__ , "w" , encoding="utf-8" ) as writer:
UpperCAmelCase_: int = self.to_dict()
UpperCAmelCase_: Optional[Any] = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n"
writer.write(A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = copy.deepcopy(self.__dict__ )
UpperCAmelCase_: List[str] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self ):
"""simple docstring"""
return F"{self.__class__.__name__} {self.to_json_string()}"
def snake_case_ ( self , A__ = True ):
"""simple docstring"""
if use_diff is True:
UpperCAmelCase_: Any = self.to_diff_dict()
else:
UpperCAmelCase_: Tuple = self.to_dict()
return json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n"
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: int = self.to_dict()
# get the default config dict
UpperCAmelCase_: List[Any] = BitsAndBytesConfig().to_dict()
UpperCAmelCase_: List[str] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase_: List[Any] = value
return serializable_config_dict
| 137
| 1
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : List[str] =10
snake_case__ : Tuple =datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
snake_case__ : Optional[int] =datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(SCREAMING_SNAKE_CASE ) ),
} , features=SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
snake_case__ : Optional[Any] =str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
lowerCamelCase__ = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
snake_case__ : List[Any] =tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
snake_case__ : Optional[int] =FILE_CONTENT
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
import bza
snake_case__ : Union[str, Any] =tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
snake_case__ : str =bytes(SCREAMING_SNAKE_CASE , '''utf-8''' )
with bza.open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
import gzip
snake_case__ : Union[str, Any] =str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
snake_case__ : int =bytes(SCREAMING_SNAKE_CASE , '''utf-8''' )
with gzip.open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case__ : Tuple =tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
snake_case__ : Tuple =bytes(SCREAMING_SNAKE_CASE , '''utf-8''' )
with lza.frame.open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case__ : Union[str, Any] =tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as archive:
archive.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
import tarfile
snake_case__ : Any =tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
import lzma
snake_case__ : List[Any] =tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
snake_case__ : List[str] =bytes(SCREAMING_SNAKE_CASE , '''utf-8''' )
with lzma.open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
import zipfile
snake_case__ : Tuple =tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case__ : str =tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
snake_case__ : Dict =bytes(SCREAMING_SNAKE_CASE , '''utf-8''' )
with zstd.open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : int =tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
snake_case__ : Any =textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return filename
lowerCamelCase__ = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowerCamelCase__ = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowerCamelCase__ = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowerCamelCase__ = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowerCamelCase__ = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='''session''' )
def lowercase_ ( ):
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : Dict =datasets.Dataset.from_dict(SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
snake_case__ : List[str] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE ) ) as con:
snake_case__ : Optional[int] =con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : Tuple =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as f:
snake_case__ : Any =csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
snake_case__ : Any =str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , newline='''''' ) as f:
snake_case__ : Optional[int] =csv.DictWriter(SCREAMING_SNAKE_CASE , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
import bza
snake_case__ : List[str] =tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
snake_case__ : Dict =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : List[Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : str =tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : str =tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
snake_case__ : Union[str, Any] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
snake_case__ : Tuple =pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as f:
snake_case__ : Any =pq.ParquetWriter(SCREAMING_SNAKE_CASE , schema=SCREAMING_SNAKE_CASE )
snake_case__ : Any =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=SCREAMING_SNAKE_CASE )
writer.write_table(SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
snake_case__ : str =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
snake_case__ : List[str] ={'''data''': DATA}
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : int =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
snake_case__ : Any ={'''data''': DATA_DICT_OF_LISTS}
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
snake_case__ : Any =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : Optional[int] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
snake_case__ : Tuple =str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : List[str] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
import gzip
snake_case__ : List[Any] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , '''wb''' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
import gzip
snake_case__ : str =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as orig_file:
with gzip.open(SCREAMING_SNAKE_CASE , '''wb''' ) as zipped_file:
zipped_file.writelines(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
snake_case__ : Optional[int] =tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
snake_case__ : Optional[Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('''nested''' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
snake_case__ : Union[str, Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : Optional[Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
snake_case__ : int =tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join('''nested''' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
snake_case__ : Dict =['''0''', '''1''', '''2''', '''3''']
snake_case__ : List[Any] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
snake_case__ : Tuple =['''0''', '''1''', '''2''', '''3''']
snake_case__ : List[Any] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
snake_case__ : List[str] =['''0''', '''1''', '''2''', '''3''']
snake_case__ : Union[str, Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
snake_case__ : Any =tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
snake_case__ : Optional[Any] =tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.join('''main_dir''' , os.path.basename(SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
snake_case__ : Any =tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
snake_case__ : Tuple ='''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
snake_case__ : Optional[int] =str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( ):
"""simple docstring"""
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def lowercase_ ( ):
"""simple docstring"""
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : Optional[int] =tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ) )
f.write(SCREAMING_SNAKE_CASE , arcname=os.path.basename(SCREAMING_SNAKE_CASE ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase_ ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : Union[str, Any] =tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 710
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =UnCLIPImageVariationPipeline
lowerCAmelCase__ =IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
lowerCAmelCase__ =IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ =[
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
lowerCAmelCase__ =False
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Union[str, Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : List[Any] =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : List[Any] ={
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case__ : List[Any] =UnCLIPTextProjModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : int ={
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case__ : Union[str, Any] =UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Tuple =UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
torch.manual_seed(1 )
snake_case__ : Optional[Any] =UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] =self.dummy_decoder
snake_case__ : List[Any] =self.dummy_text_proj
snake_case__ : Optional[Any] =self.dummy_text_encoder
snake_case__ : List[Any] =self.dummy_tokenizer
snake_case__ : str =self.dummy_super_res_first
snake_case__ : int =self.dummy_super_res_last
snake_case__ : Union[str, Any] =UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
snake_case__ : Union[str, Any] =UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
snake_case__ : Union[str, Any] =CLIPImageProcessor(crop_size=32 , size=32 )
snake_case__ : Optional[int] =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : List[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
snake_case__ : Tuple =torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : int =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
if pil_image:
snake_case__ : Union[str, Any] =input_image * 0.5 + 0.5
snake_case__ : Dict =input_image.clamp(0 , 1 )
snake_case__ : str =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case__ : Union[str, Any] =DiffusionPipeline.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple ='''cpu'''
snake_case__ : int =self.get_dummy_components()
snake_case__ : Dict =self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =pipe(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =output.images
snake_case__ : Dict =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =pipe(
**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : Union[str, Any] =image[0, -3:, -3:, -1]
snake_case__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : int =np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict ='''cpu'''
snake_case__ : Any =self.get_dummy_components()
snake_case__ : Any =self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =pipe(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =output.images
snake_case__ : Optional[Any] =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =pipe(
**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : Optional[int] =image[0, -3:, -3:, -1]
snake_case__ : int =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : str =np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[Any] ='''cpu'''
snake_case__ : Optional[int] =self.get_dummy_components()
snake_case__ : str =self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =[
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case__ : Tuple =pipe(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =output.images
snake_case__ : str =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
snake_case__ : int =[
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case__ : List[str] =pipe(
**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0]
snake_case__ : Dict =image[0, -3:, -3:, -1]
snake_case__ : str =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
snake_case__ : str =np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] =torch.device('''cpu''' )
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase__ =1
snake_case__ : int =self.get_dummy_components()
snake_case__ : List[Any] =self.pipeline_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Any =torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : int =pipe.decoder.dtype
snake_case__ : Optional[Any] =1
snake_case__ : int =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case__ : int =pipe.prepare_latents(
__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
snake_case__ : Dict =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case__ : List[str] =pipe.prepare_latents(
__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
snake_case__ : str =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =pipe(
**__SCREAMING_SNAKE_CASE , decoder_latents=__SCREAMING_SNAKE_CASE , super_res_latents=__SCREAMING_SNAKE_CASE ).images
snake_case__ : List[Any] =self.get_dummy_inputs(__SCREAMING_SNAKE_CASE , pil_image=__SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
snake_case__ : Dict =pipeline_inputs.pop('''image''' )
snake_case__ : List[Any] =pipe.image_encoder(__SCREAMING_SNAKE_CASE ).image_embeds
snake_case__ : Optional[int] =pipe(
**__SCREAMING_SNAKE_CASE , decoder_latents=__SCREAMING_SNAKE_CASE , super_res_latents=__SCREAMING_SNAKE_CASE , image_embeddings=__SCREAMING_SNAKE_CASE , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[str] =torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case__ : Union[str, Any] =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=__SCREAMING_SNAKE_CASE )
@skip_mps
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : str =torch_device == '''cpu'''
snake_case__ : Any =True
snake_case__ : Optional[int] =[
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__SCREAMING_SNAKE_CASE , relax_max_difference=__SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : str =[
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case__ : int =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=__SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__SCREAMING_SNAKE_CASE )
@skip_mps
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case__ : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case__ : List[str] =UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case__ : int =pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Any =torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ : Tuple =pipeline(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
snake_case__ : int =output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 15 )
| 408
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = """luke"""
def __init__( self , _lowerCamelCase=50_267 , _lowerCamelCase=500_000 , _lowerCamelCase=768 , _lowerCamelCase=256 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3_072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
__lowercase = vocab_size
__lowercase = entity_vocab_size
__lowercase = hidden_size
__lowercase = entity_emb_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_entity_aware_attention
__lowercase = classifier_dropout
| 118
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase = pd.read_csv('''sample_data.csv''', header=None)
_lowercase = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase = df.iloc[:, 1:2]
_lowercase = actual_data.values.reshape(len_data, 1)
_lowercase = MinMaxScaler().fit_transform(actual_data)
_lowercase = 10
_lowercase = 5
_lowercase = 20
_lowercase = len_data - periods * look_back
_lowercase = actual_data[:division]
_lowercase = actual_data[division - look_back :]
_lowercase , _lowercase = [], []
_lowercase , _lowercase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase = np.array(train_x)
_lowercase = np.array(test_x)
_lowercase = np.array([list(i.ravel()) for i in train_y])
_lowercase = np.array([list(i.ravel()) for i in test_y])
_lowercase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
_lowercase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_lowercase = model.predict(x_test)
| 118
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Any = logging.get_logger(__name__)
def A__ ( A_ , A_=False ) -> Dict:
_lowercase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A__ ( A_ , A_ , A_=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
_lowercase = ""
else:
_lowercase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[
: config.hidden_size, :
]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def A__ ( A_ , A_ , A_ ) -> str:
_lowercase = dct.pop(A_ )
_lowercase = val
def A__ ( ) -> Union[str, Any]:
_lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def A__ ( A_ , A_ ) -> Tuple:
_lowercase = DeiTConfig()
# all deit models have fine-tuned heads
_lowercase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowercase = 1_000
_lowercase = "huggingface/label-files"
_lowercase = "imagenet-1k-id2label.json"
_lowercase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
_lowercase = {int(A_ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = int(deit_name[-6:-4] )
_lowercase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_lowercase = 192
_lowercase = 768
_lowercase = 12
_lowercase = 3
elif deit_name[9:].startswith("small" ):
_lowercase = 384
_lowercase = 1_536
_lowercase = 12
_lowercase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_lowercase = 1_024
_lowercase = 4_096
_lowercase = 24
_lowercase = 16
# load original model from timm
_lowercase = timm.create_model(A_ , pretrained=A_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowercase = timm_model.state_dict()
_lowercase = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ , A_ )
# load HuggingFace model
_lowercase = DeiTForImageClassificationWithTeacher(A_ ).eval()
model.load_state_dict(A_ )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowercase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowercase = DeiTImageProcessor(size=A_ , crop_size=config.image_size )
_lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
_lowercase = encoding["pixel_values"]
_lowercase = model(A_ )
_lowercase = timm_model(A_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A_ , outputs.logits , atol=1e-3 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
__magic_name__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__magic_name__ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 602
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : List[Any] = logging.get_logger(__name__)
def A__ ( A_ ) -> List[str]:
_lowercase = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
_lowercase = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , A_ )
if matches:
_lowercase = float(matches[1] )
_lowercase = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowercase = 1_001
_lowercase = "imagenet-1k-id2label.json"
_lowercase = "huggingface/label-files"
_lowercase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
_lowercase = {int(A_ ) + 1: v for k, v in idalabel.items()}
_lowercase = "background"
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
return config
def A__ ( ) -> str:
_lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def A__ ( A_ , A_ , A_ , A_=False ) -> List[Any]:
_lowercase = get_mobilenet_va_config(A_ )
# Load 🤗 model
_lowercase = MobileNetVaForImageClassification(A_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(A_ , A_ , A_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowercase = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
_lowercase = image_processor(images=prepare_img() , return_tensors="pt" )
_lowercase = model(**A_ )
_lowercase = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
_lowercase = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowercase = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_lowercase = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , A_ , atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A_ )
if push_to_hub:
print("Pushing to the hub..." )
_lowercase = "google/" + model_name
image_processor.push_to_hub(A_ )
model.push_to_hub(A_ )
if __name__ == "__main__":
__magic_name__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__magic_name__ : Union[str, Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 602
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : Union[str, Any] = 16
_snake_case : Optional[Any] = 32
def a_ ( lowerCAmelCase_ : Accelerator, lowerCAmelCase_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase = load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case : Union[str, Any] = mocked_dataloaders # noqa: F811
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Dict ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCAmelCase_ ) == "1":
__lowerCAmelCase = 2
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config['lr']
__lowerCAmelCase = int(config['num_epochs'] )
__lowerCAmelCase = int(config['seed'] )
__lowerCAmelCase = int(config['batch_size'] )
__lowerCAmelCase = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowerCAmelCase = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 53
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693
| 0
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowercase ( _a , _a = True , _a = math.inf , _a = -math.inf , _a = math.inf , _a = -math.inf , _a = False , _a = 100 , _a = 0.01 , _a = 1 , ):
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = search_prob
snake_case_ : str = start_temperate
snake_case_ : Dict = []
snake_case_ : int = 0
snake_case_ : Tuple = None
while not search_end:
snake_case_ : int = current_state.score()
if best_state is None or current_score > best_state.score():
snake_case_ : Any = current_state
scores.append(_a )
iterations += 1
snake_case_ : Optional[Any] = None
snake_case_ : Dict = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
snake_case_ : Optional[Any] = random.randint(0 , len(_a ) - 1 ) # picking a random neighbor
snake_case_ : Union[str, Any] = neighbors.pop(_a )
snake_case_ : str = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
snake_case_ : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
snake_case_ : List[str] = picked_neighbor
else:
snake_case_ : Tuple = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
snake_case_ : Tuple = picked_neighbor
snake_case_ : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
snake_case_ : Dict = True
else:
snake_case_ : List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_a ) , _a )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __lowercase ( _a , _a ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase__ : Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase__ : Union[str, Any] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowercase__ : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase__ : int = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def __lowercase ( _a , _a ):
return (3 * x**2) - (6 * y)
lowercase__ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase__ : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
lowercase__ : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase__ : Tuple = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
| 705
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : List[Any] , *lowercase_ : List[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Tuple=None , **lowercase_ : Union[str, Any] ):
super().__init__(*lowercase_ , **lowercase_ )
snake_case_ : str = eval_examples
snake_case_ : Optional[Any] = post_process_function
def _snake_case ( self : Union[str, Any] , lowercase_ : Optional[Dataset] = None , lowercase_ : str=None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "eval" , **lowercase_ : Dict , ):
snake_case_ : int = gen_kwargs.copy()
snake_case_ : Dict = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
snake_case_ : Tuple = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
snake_case_ : Optional[Any] = gen_kwargs
snake_case_ : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case_ : Optional[Any] = self.get_eval_dataloader(lowercase_ )
snake_case_ : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : Optional[int] = self.compute_metrics
snake_case_ : str = None
snake_case_ : int = time.time()
snake_case_ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ : str = eval_loop(
lowercase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
snake_case_ : Optional[int] = compute_metrics
snake_case_ : Dict = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case_ : Tuple = self.post_process_function(lowercase_ , lowercase_ , lowercase_ )
snake_case_ : Optional[int] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
snake_case_ : int = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
snake_case_ : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case_ : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def _snake_case ( self : Any , lowercase_ : int , lowercase_ : List[str] , lowercase_ : str=None , lowercase_ : str = "test" , **lowercase_ : Dict ):
snake_case_ : Dict = gen_kwargs.copy()
snake_case_ : Dict = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case_ : Tuple = self.compute_metrics
snake_case_ : int = None
snake_case_ : List[Any] = time.time()
snake_case_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case_ : List[str] = eval_loop(
lowercase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
snake_case_ : str = compute_metrics
snake_case_ : int = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case_ : Tuple = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , '''predict''' )
snake_case_ : Dict = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
snake_case_ : Union[str, Any] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 485
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : int = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 233
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ : Dict = 1_6
A__ : Union[str, Any] = 3_2
def _a ( __UpperCamelCase : List[str] ):
return int(x / 2**20 )
class lowercase :
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase__ : str = torch.cuda.memory_allocated()
return self
def __exit__( self , *SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase__ : Optional[Any] = torch.cuda.memory_allocated()
lowerCAmelCase__ : str = torch.cuda.max_memory_allocated()
lowerCAmelCase__ : Any = bamb(self.end - self.begin )
lowerCAmelCase__ : Optional[int] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _a ( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ,__UpperCamelCase : str = "bert-base-cased" ,__UpperCamelCase : int = 320 ,__UpperCamelCase : int = 160 ,):
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCAmelCase__ : List[Any] = load_dataset(
'''glue''' ,'''mrpc''' ,split={'''train''': f'''train[:{n_train}]''', '''validation''': f'''validation[:{n_val}]'''} )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : Optional[Any] = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : Dict = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : Union[str, Any] = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__UpperCamelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ : Any = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _a ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
# Initialize accelerator
lowerCAmelCase__ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : str = config['''lr''']
lowerCAmelCase__ : Any = int(config['''num_epochs'''] )
lowerCAmelCase__ : str = int(config['''seed'''] )
lowerCAmelCase__ : List[Any] = int(config['''batch_size'''] )
lowerCAmelCase__ : Optional[int] = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,args.n_train ,args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCAmelCase__ : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ : Tuple = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Union[str, Any] = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCAmelCase__ : Dict = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ : int = 0
# Now we train the model
lowerCAmelCase__ : Optional[Any] = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCAmelCase__ : List[str] = model(**__UpperCamelCase )
lowerCAmelCase__ : Dict = outputs.loss
lowerCAmelCase__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''peak_memory_utilization.json''' ) ,'''w''' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _a ( ):
lowerCAmelCase__ : str = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' ,type=__UpperCamelCase ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__UpperCamelCase ,)
parser.add_argument(
'''--output_dir''' ,type=__UpperCamelCase ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--peak_memory_upper_bound''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' ,)
parser.add_argument(
'''--n_train''' ,type=__UpperCamelCase ,default=320 ,help='''Number of training examples to use.''' ,)
parser.add_argument(
'''--n_val''' ,type=__UpperCamelCase ,default=160 ,help='''Number of validation examples to use.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=__UpperCamelCase ,default=1 ,help='''Number of train epochs.''' ,)
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : str = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 233
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class snake_case ( _UpperCamelCase ):
'''simple docstring'''
A_ : Optional[Any] = "codegen"
A_ : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple, _lowerCamelCase : Tuple=5_04_00, _lowerCamelCase : Union[str, Any]=20_48, _lowerCamelCase : Optional[int]=20_48, _lowerCamelCase : Optional[int]=40_96, _lowerCamelCase : Optional[int]=28, _lowerCamelCase : Tuple=16, _lowerCamelCase : Union[str, Any]=64, _lowerCamelCase : str=None, _lowerCamelCase : List[Any]="gelu_new", _lowerCamelCase : Union[str, Any]=0.0, _lowerCamelCase : Dict=0.0, _lowerCamelCase : Union[str, Any]=0.0, _lowerCamelCase : Any=1e-5, _lowerCamelCase : int=0.02, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : List[str]=5_02_56, _lowerCamelCase : Any=5_02_56, _lowerCamelCase : Union[str, Any]=False, **_lowerCamelCase : str, ):
'''simple docstring'''
__A = vocab_size
__A = n_ctx
__A = n_positions
__A = n_embd
__A = n_layer
__A = n_head
__A = n_inner
__A = rotary_dim
__A = activation_function
__A = resid_pdrop
__A = embd_pdrop
__A = attn_pdrop
__A = layer_norm_epsilon
__A = initializer_range
__A = use_cache
__A = bos_token_id
__A = eos_token_id
super().__init__(
bos_token_id=_lowerCamelCase, eos_token_id=_lowerCamelCase, tie_word_embeddings=_lowerCamelCase, **_lowerCamelCase )
class snake_case ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : PretrainedConfig, _lowerCamelCase : str = "default", _lowerCamelCase : List[PatchingSpec] = None, _lowerCamelCase : bool = False, ):
'''simple docstring'''
super().__init__(_lowerCamelCase, task=_lowerCamelCase, patching_specs=_lowerCamelCase, use_past=_lowerCamelCase )
if not getattr(self._config, '''pad_token_id''', _lowerCamelCase ):
# TODO: how to do that better?
__A = 0
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase, direction='''inputs''' )
__A = {0: "batch", 1: "past_sequence + sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return self._config.n_layer
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
return self._config.n_head
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : PreTrainedTokenizer, _lowerCamelCase : int = -1, _lowerCamelCase : int = -1, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[TensorType] = None, ):
'''simple docstring'''
__A = super(_lowerCamelCase, self ).generate_dummy_inputs(
_lowerCamelCase, batch_size=_lowerCamelCase, seq_length=_lowerCamelCase, is_pair=_lowerCamelCase, framework=_lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__A = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__A = seqlen + 2
__A = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__A = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(self.num_layers )
]
__A = common_inputs["attention_mask"]
if self.use_past:
__A = ordered_inputs["attention_mask"].dtype
__A = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowerCamelCase, _lowerCamelCase, dtype=_lowerCamelCase )], dim=1 )
return ordered_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return 13
| 715
|
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : float ):
'''simple docstring'''
return 0.0
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__A = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = 5_1_2
__A = [1] + [0] * (size - 1)
__A = [filter_type.process(__UpperCamelCase ) for item in inputs]
__A = [0] * (samplerate - size) # zero-padding
outputs += filler
__A = np.abs(np.fft.fft(__UpperCamelCase ) )
__A = 2_0 * np.logaa(__UpperCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
__A = get_bounds(__UpperCamelCase , __UpperCamelCase )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__UpperCamelCase )
plt.show()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = 5_1_2
__A = [1] + [0] * (size - 1)
__A = [filter_type.process(__UpperCamelCase ) for item in inputs]
__A = [0] * (samplerate - size) # zero-padding
outputs += filler
__A = np.angle(np.fft.fft(__UpperCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__UpperCamelCase , -2 * pi ) )
plt.show()
| 215
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
snake_case__ : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=SCREAMING_SNAKE_CASE_ ,cache_dir=SCREAMING_SNAKE_CASE_ )
snake_case__ : int = [t[-1] for t in os.walk(os.path.join(SCREAMING_SNAKE_CASE_ ,os.listdir(SCREAMING_SNAKE_CASE_ )[0] ,'''snapshots''' ) )]
snake_case__ : Union[str, Any] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Tuple ):
snake_case__ , snake_case__ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=SCREAMING_SNAKE_CASE_ )
snake_case__ : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Union[str, Any] = jax.random.PRNGKey(0 )
snake_case__ : Optional[int] = 4
snake_case__ : Dict = jax.device_count()
snake_case__ : Tuple = num_samples * [prompt]
snake_case__ : Any = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
snake_case__ : Optional[int] = replicate(SCREAMING_SNAKE_CASE_ )
snake_case__ : List[str] = jax.random.split(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = shard(SCREAMING_SNAKE_CASE_ )
snake_case__ : Union[str, Any] = pipeline(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(SCREAMING_SNAKE_CASE_ ,dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
snake_case__ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(SCREAMING_SNAKE_CASE_ ) == num_samples
def __lowerCamelCase ( self :List[Any] ):
snake_case__ , snake_case__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''flax''' ,safety_checker=SCREAMING_SNAKE_CASE_ )
snake_case__ : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : List[str] = jax.random.PRNGKey(0 )
snake_case__ : Dict = 5_0
snake_case__ : Any = jax.device_count()
snake_case__ : List[Any] = num_samples * [prompt]
snake_case__ : Optional[int] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
snake_case__ : Optional[int] = replicate(SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[int] = jax.random.split(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = shard(SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = pipeline(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE_ ,dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ , snake_case__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=SCREAMING_SNAKE_CASE_ )
snake_case__ : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Dict = jax.random.PRNGKey(0 )
snake_case__ : Any = 5_0
snake_case__ : Tuple = jax.device_count()
snake_case__ : List[str] = num_samples * [prompt]
snake_case__ : Optional[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
snake_case__ : str = replicate(SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = jax.random.split(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[int] = shard(SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = pipeline(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE_ ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ , snake_case__ : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa )
snake_case__ : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Optional[Any] = jax.random.PRNGKey(0 )
snake_case__ : str = 5_0
snake_case__ : Optional[Any] = jax.device_count()
snake_case__ : Union[str, Any] = num_samples * [prompt]
snake_case__ : Optional[int] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
snake_case__ : Optional[int] = replicate(SCREAMING_SNAKE_CASE_ )
snake_case__ : List[str] = jax.random.split(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case__ : Any = shard(SCREAMING_SNAKE_CASE_ )
snake_case__ : Union[str, Any] = pipeline(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE_ ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = FlaxDDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,set_alpha_to_one=SCREAMING_SNAKE_CASE_ ,steps_offset=1 ,)
snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,scheduler=SCREAMING_SNAKE_CASE_ ,safety_checker=SCREAMING_SNAKE_CASE_ ,)
snake_case__ : List[str] = scheduler.create_state()
snake_case__ : str = scheduler_state
snake_case__ : Union[str, Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : List[str] = jax.random.PRNGKey(0 )
snake_case__ : Tuple = 5_0
snake_case__ : str = jax.device_count()
snake_case__ : int = num_samples * [prompt]
snake_case__ : Union[str, Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
# shard inputs and rng
snake_case__ : Optional[int] = replicate(SCREAMING_SNAKE_CASE_ )
snake_case__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case__ : str = shard(SCREAMING_SNAKE_CASE_ )
snake_case__ : str = pipeline(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(SCREAMING_SNAKE_CASE_ ,dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Union[str, Any] = jax.device_count()
snake_case__ : Any = num_samples * [prompt]
snake_case__ : List[Any] = jax.random.split(jax.random.PRNGKey(0 ) ,SCREAMING_SNAKE_CASE_ )
snake_case__ , snake_case__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=SCREAMING_SNAKE_CASE_ ,)
snake_case__ : List[Any] = replicate(SCREAMING_SNAKE_CASE_ )
snake_case__ : Tuple = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = shard(SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = pipeline(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,jit=SCREAMING_SNAKE_CASE_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : Union[str, Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
snake_case__ , snake_case__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=SCREAMING_SNAKE_CASE_ ,use_memory_efficient_attention=SCREAMING_SNAKE_CASE_ ,)
snake_case__ : int = replicate(SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = shard(SCREAMING_SNAKE_CASE_ )
snake_case__ : Union[str, Any] = pipeline(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,jit=SCREAMING_SNAKE_CASE_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : Tuple = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 252
|
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def _lowerCamelCase ( __a, __a, __a, __a, __a=0 ):
os.makedirs(__a, exist_ok=__a )
with FSDP.state_dict_type(
__a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(__a, __a )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Saving model to {output_model_file}' )
torch.save(__a, __a )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = os.path.join(__a, F'{MODEL_NAME}_{model_index}' )
os.makedirs(__a, exist_ok=__a )
logger.info(F'Saving model to {ckpt_dir}' )
SCREAMING_SNAKE_CASE_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=__a, storage_writer=dist_cp.FileSystemWriter(__a ), planner=DefaultSavePlanner(), )
logger.info(F'Model saved to {ckpt_dir}' )
def _lowerCamelCase ( __a, __a, __a, __a, __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
SCREAMING_SNAKE_CASE_ = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Loading model from {input_model_file}' )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Loading model from {input_model_file}' )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
os.path.join(__a, F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
SCREAMING_SNAKE_CASE_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a, storage_reader=dist_cp.FileSystemReader(__a ), planner=DefaultLoadPlanner(), )
SCREAMING_SNAKE_CASE_ = state_dict['''model''']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(__a )
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=0 ):
os.makedirs(__a, exist_ok=__a )
with FSDP.state_dict_type(
__a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict(__a, __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE_ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(__a, __a )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
SCREAMING_SNAKE_CASE_ = os.path.join(__a, F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(__a, exist_ok=__a )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state}, storage_writer=dist_cp.FileSystemWriter(__a ), planner=DefaultSavePlanner(), )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a, fsdp_plugin.state_dict_type, fsdp_plugin.state_dict_config, fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE_ = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
SCREAMING_SNAKE_CASE_ = (
os.path.join(__a, F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
SCREAMING_SNAKE_CASE_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict(), optimizer_key='''optimizer''', storage_reader=dist_cp.FileSystemReader(__a ), )
SCREAMING_SNAKE_CASE_ = optim_state['''optimizer''']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict_to_load(__a, __a, __a )
optimizer.load_state_dict(__a )
| 626
| 0
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
__lowerCamelCase = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = Github(os.environ['GITHUB_TOKEN'] )
A__ = g.get_repo('huggingface/transformers' )
A__ = repo.get_issues(state='open' )
for issue in open_issues:
A__ = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
A__ = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 713
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__lowerCamelCase = "."
if __name__ == "__main__":
__lowerCamelCase = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
__lowerCamelCase = []
__lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
__lowerCamelCase = line.strip()
__lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__lowerCamelCase = "\n".join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 536
| 0
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """spiece.model"""}
_UpperCamelCase = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
_UpperCamelCase = {
"""AI-Sweden/gpt-sw3-126m""": 2048,
"""AI-Sweden/gpt-sw3-350m""": 2048,
"""AI-Sweden/gpt-sw3-1.6b""": 2048,
"""AI-Sweden/gpt-sw3-6.7b""": 2048,
"""AI-Sweden/gpt-sw3-20b""": 2048,
}
class __magic_name__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
__A : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
__A : Optional[int] = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
__A : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__A : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
__A : Tuple = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__A : Optional[int] = unk_token if pad_token is None else pad_token
__A : List[Any] = eos_token if bos_token is None else bos_token
else:
__A : Tuple = "<pad>" if pad_token is None else pad_token
__A : Optional[Any] = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase , remove_space=lowerCamelCase , keep_accents=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
__A : str = do_lower_case
__A : str = remove_space
__A : str = keep_accents
__A : Optional[int] = vocab_file
__A : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
__A : str = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__A : int = re.compile(
f"[{''.join(map(lowerCamelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
'''simple docstring'''
__A : Optional[Any] = self.__dict__.copy()
__A : List[Any] = None
return state
def __setstate__( self , lowerCamelCase ):
'''simple docstring'''
__A : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__A : Tuple = {}
__A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Any = self.non_printing_characters_re.sub("" , lowerCamelCase )
# Normalize whitespaces
__A : Union[str, Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
__A : Optional[int] = unicodedata.normalize("NFC" , lowerCamelCase )
return text
def lowerCAmelCase__ ( self , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = self.preprocess_text(lowerCamelCase )
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase ):
'''simple docstring'''
return out_string
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = []
__A : int = ""
__A : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
__A : Optional[Any] = True
__A : str = []
else:
current_sub_tokens.append(lowerCamelCase )
__A : Union[str, Any] = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Dict = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__A : Union[str, Any] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
__A : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
if isinstance(lowerCamelCase , lowerCamelCase ):
__A : Optional[Any] = self.preprocess_text(lowerCamelCase )
__A : Tuple = self.sp_model.encode(lowerCamelCase )
else:
__A : Any = [self.preprocess_text(lowerCamelCase ) for t in text]
__A : List[Any] = self.sp_model.encode(lowerCamelCase )
if return_tensors is True or return_tensors == "pt":
__A : Dict = torch.tensor(lowerCamelCase )
return token_ids
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
return self.sp_model.decode(lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
__A : Dict = [f"User: {text}" if is_user else f"Bot: {text}" for is_user, text in conversation.iter_texts()]
__A : int = (
f"{self.eos_token}{self.bos_token}" + f"{self.bos_token}".join(lowerCamelCase ) + f"{self.bos_token}Bot:"
)
return self.encode(text=lowerCamelCase )
| 111
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
_UpperCamelCase = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CamembertTokenizer
lowerCamelCase__ = CamembertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A : List[Any] = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = "<pad>"
__A : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
__A : Optional[int] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__A : str = "I was born in 92000, and this is falsé."
__A : List[Any] = tokenizer.encode(lowerCamelCase )
__A : Tuple = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__A : Dict = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__A : str = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__A : int = tokenizer.convert_ids_to_tokens(lowerCamelCase )
__A : Optional[int] = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__A : Tuple = self.get_tokenizer()
__A : List[Any] = self.get_rust_tokenizer()
__A : Any = "I was born in 92000, and this is falsé."
__A : int = tokenizer.tokenize(lowerCamelCase )
__A : Any = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__A : Any = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__A : Optional[int] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__A : Union[str, Any] = self.get_rust_tokenizer()
__A : Optional[Any] = tokenizer.encode(lowerCamelCase )
__A : Dict = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Any = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__A : int = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowerCamelCase , )
| 111
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = logging.get_logger()
@dataclass
class __magic_name__ :
lowercase : nn.Module
lowercase : List[nn.Module] =field(default_factory=A__ )
lowercase : list =field(default_factory=A__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tensor , UpperCamelCase__ : Tensor ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__ , nn.Convad ) or isinstance(UpperCamelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : Tensor ) -> str:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase__ )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __magic_name__ :
lowercase : nn.Module
lowercase : nn.Module
lowercase : int =1
lowercase : List =field(default_factory=A__ )
lowercase : List =field(default_factory=A__ )
lowercase : bool =True
def __call__( self : str , UpperCamelCase__ : Tensor ) -> str:
'''simple docstring'''
UpperCAmelCase = Tracker(self.dest )(UpperCamelCase__ ).parametrized
UpperCAmelCase = Tracker(self.src )(UpperCamelCase__ ).parametrized
UpperCAmelCase = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip , UpperCamelCase__ ) )
UpperCAmelCase = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip , UpperCamelCase__ ) )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while'
F' destination module has {len(UpperCamelCase__ )}.' )
for dest_m, src_m in zip(UpperCamelCase__ , UpperCamelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class __magic_name__ ( nn.Module ):
def __init__( self : List[Any] , UpperCamelCase__ : nn.Module ) -> Tuple:
'''simple docstring'''
super().__init__()
UpperCAmelCase = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'Unexpected layer name {k}'
UpperCAmelCase = len(UpperCamelCase__ ) + 1
feature_blocks.append((F'res{block_index}', v) )
UpperCAmelCase = nn.ModuleDict(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : Tensor ) -> Tuple:
'''simple docstring'''
return get_trunk_forward_outputs(
UpperCamelCase__ , out_feat_keys=UpperCamelCase__ , feature_blocks=self._feature_blocks , )
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
UpperCAmelCase = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : List[Any] , UpperCamelCase__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
UpperCAmelCase = self.convert_name_to_timm(UpperCamelCase__ )
UpperCAmelCase = partial(lambda: (timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval(), None) )
else:
UpperCAmelCase = super().__getitem__(UpperCamelCase__ )
return val
class __magic_name__ ( A__ ):
def __getitem__( self : Optional[int] , UpperCamelCase__ : str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
UpperCAmelCase = RegNetModel
else:
UpperCAmelCase = RegNetForImageClassification
return val
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
for from_key, to_key in keys:
UpperCAmelCase = from_state_dict[from_key].clone()
print(F'Copied key={from_key} to={to_key}' )
return to_state_dict
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ) -> List[str]:
print(F'Converting {name}...' )
with torch.no_grad():
UpperCAmelCase , UpperCAmelCase = from_model_func()
UpperCAmelCase = our_model_func(lowerCamelCase_ ).eval()
UpperCAmelCase = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ , raise_if_mismatch=lowerCamelCase_ )
UpperCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCamelCase_ )
if from_state_dict is not None:
UpperCAmelCase = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
UpperCAmelCase = manually_copy_vissl_head(lowerCamelCase_ , our_model.state_dict() , lowerCamelCase_ )
our_model.load_state_dict(lowerCamelCase_ )
UpperCAmelCase = our_model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ )
UpperCAmelCase = (
our_outputs.logits if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else our_outputs.last_hidden_state
)
UpperCAmelCase = from_model(lowerCamelCase_ )
UpperCAmelCase = from_output[-1] if type(lowerCamelCase_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase = our_outputs.hidden_states[-1]
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=lowerCamelCase_ , )
UpperCAmelCase = 224 if "seer" not in name else 384
# we can use the convnext one
UpperCAmelCase = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=lowerCamelCase_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=lowerCamelCase_ , )
print(F'Pushed {name}' )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True ) -> Dict:
UpperCAmelCase = "imagenet-1k-id2label.json"
UpperCAmelCase = 1_000
UpperCAmelCase = (1, num_labels)
UpperCAmelCase = "huggingface/label-files"
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ )
UpperCAmelCase = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
UpperCAmelCase = NameToOurModelFuncMap()
UpperCAmelCase = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowerCamelCase_ , lowerCamelCase_ ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase_ , model_dir=str(lowerCamelCase_ ) , map_location="cpu" )
UpperCAmelCase = model_func()
# check if we have a head, if yes add it
UpperCAmelCase = files["classy_state_dict"]["base_model"]["model"]
UpperCAmelCase = model_state_dict["trunk"]
model.load_state_dict(lowerCamelCase_ )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase = partial(
lowerCamelCase_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowerCamelCase_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowerCamelCase_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase = partial(
lowerCamelCase_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
UpperCAmelCase = partial(
lowerCamelCase_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowerCamelCase_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase = partial(
lowerCamelCase_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase = partial(
lowerCamelCase_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowerCamelCase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__lowerCamelCase : List[str] = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 716
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = "hf-internal-testing/tiny-random-t5"
UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = tokenizer("This is me" , return_tensors="pt" )
UpperCAmelCase = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase = model.generate(**UpperCamelCase__ )
UpperCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase = model_reloaded.generate(**UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = "hf-internal-testing/tiny-random-t5"
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase__ ):
model.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase__ )
| 457
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] ='yolos'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=[512, 864], lowerCAmelCase=16, lowerCAmelCase=3, lowerCAmelCase=True, lowerCAmelCase=100, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=0.1, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =qkv_bias
lowerCamelCase_ =num_detection_tokens
lowerCamelCase_ =use_mid_position_embeddings
lowerCamelCase_ =auxiliary_loss
# Hungarian matcher
lowerCamelCase_ =class_cost
lowerCamelCase_ =bbox_cost
lowerCamelCase_ =giou_cost
# Loss coefficients
lowerCamelCase_ =bbox_loss_coefficient
lowerCamelCase_ =giou_loss_coefficient
lowerCamelCase_ =eos_coefficient
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 12
| 676
|
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =LongformerTokenizer
snake_case_ =True
snake_case_ =LongformerTokenizerFast
snake_case_ =True
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase__ : Optional[Any] = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
lowerCAmelCase__ : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase__ : Optional[int] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = '''lower newer'''
lowerCAmelCase__ : Tuple = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase__ : Union[str, Any] = '''lower newer'''
lowerCAmelCase__ : Optional[int] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' ,add_special_tokens=__lowerCamelCase ) ,[0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' ,add_special_tokens=__lowerCamelCase ) ,[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] ,)
@slow
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
lowerCAmelCase__ : List[Any] = tokenizer.encode('''sequence builders''' ,add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : int = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(
'''sequence builders''' ,add_special_tokens=__lowerCamelCase ,add_prefix_space=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(
'''sequence builders''' ,'''multi-sequence build''' ,add_special_tokens=__lowerCamelCase ,add_prefix_space=__lowerCamelCase )
lowerCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ,__lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_tokenizer()
lowerCAmelCase__ : List[str] = '''Encode this sequence.'''
lowerCAmelCase__ : Dict = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCAmelCase__ : Dict = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,add_prefix_space=__lowerCamelCase )
lowerCAmelCase__ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,add_prefix_space=__lowerCamelCase )
lowerCAmelCase__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCAmelCase__ : str = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase ,__lowerCamelCase )
# Testing spaces after special tokens
lowerCAmelCase__ : Any = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase )} ) # mask token has a left space
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = '''Encode <mask> sequence'''
lowerCAmelCase__ : Dict = '''Encode <mask>sequence'''
lowerCAmelCase__ : List[Any] = tokenizer.encode(__lowerCamelCase )
lowerCAmelCase__ : int = encoded.index(__lowerCamelCase )
lowerCAmelCase__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = tokenizer.encode(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = encoded.index(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = '''A, <mask> AllenNLP sentence.'''
lowerCAmelCase__ : str = tokenizer_r.encode_plus(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase )
lowerCAmelCase__ : str = tokenizer_p.encode_plus(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) ,sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) ,sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) ,)
lowerCAmelCase__ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__lowerCamelCase ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
lowerCAmelCase__ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,trim_offsets=__lowerCamelCase )
lowerCAmelCase__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] ,__lowerCamelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] ,__lowerCamelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ : str = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ : List[str] = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase ,use_fast=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,trim_offsets=__lowerCamelCase )
lowerCAmelCase__ : Dict = tokenizer_r(__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) ,)
lowerCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase ,use_fast=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,trim_offsets=__lowerCamelCase )
lowerCAmelCase__ : Any = tokenizer_r(__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) ,)
lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase ,use_fast=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,trim_offsets=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = tokenizer_r(__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) ,)
lowerCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase ,use_fast=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,trim_offsets=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer_r(__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) ,)
lowerCAmelCase__ : Optional[int] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase ,use_fast=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,trim_offsets=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer_r(__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) ,)
lowerCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase ,use_fast=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,trim_offsets=__lowerCamelCase )
lowerCAmelCase__ : int = tokenizer_r(__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) ,)
lowerCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase ,use_fast=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ,trim_offsets=__lowerCamelCase )
lowerCAmelCase__ : List[str] = tokenizer_r(__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) ,)
| 702
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =AutoencoderKL
snake_case_ ="""sample"""
snake_case_ =1e-2
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : List[Any] = (32, 32)
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
return {"sample": image}
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCAmelCase__ : str = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' ,'''Gradient checkpointing skipped on MPS''' )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : Tuple = self.model_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
lowerCAmelCase__ : Tuple = model(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowerCAmelCase__ : Optional[Any] = torch.randn_like(__lowerCamelCase )
lowerCAmelCase__ : Tuple = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowerCAmelCase__ : str = self.model_class(**__lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowerCAmelCase__ : Dict = model_a(**__lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowerCAmelCase__ : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowerCAmelCase__ : List[str] = dict(model.named_parameters() )
lowerCAmelCase__ : Optional[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
lowerCAmelCase__ : Tuple = model.to(__lowerCamelCase )
model.eval()
if torch_device == "mps":
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
else:
lowerCAmelCase__ : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = image.to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,sample_posterior=__lowerCamelCase ,generator=__lowerCamelCase ).sample
lowerCAmelCase__ : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowerCAmelCase__ : List[str] = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
lowerCAmelCase__ : str = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
lowerCAmelCase__ : Any = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
@slow
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__lowerCamelCase ) for s in shape] )}.npy"""
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ,__lowerCamelCase=0 ,__lowerCamelCase=(4, 3, 5_12, 5_12) ,__lowerCamelCase=False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCamelCase ,__lowerCamelCase ) ) ).to(__lowerCamelCase ).to(__lowerCamelCase )
return image
def lowerCAmelCase__ (self ,__lowerCamelCase="CompVis/stable-diffusion-v1-4" ,__lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = '''fp16''' if fpaa else None
lowerCAmelCase__ : Optional[Any] = torch.floataa if fpaa else torch.floataa
lowerCAmelCase__ : List[Any] = AutoencoderKL.from_pretrained(
__lowerCamelCase ,subfolder='''vae''' ,torch_dtype=__lowerCamelCase ,revision=__lowerCamelCase ,)
model.to(__lowerCamelCase ).eval()
return model
def lowerCAmelCase__ (self ,__lowerCamelCase=0 ) -> str:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(__lowerCamelCase )
return torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_sd_vae_model()
lowerCAmelCase__ : Any = self.get_sd_image(__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(__lowerCamelCase ,generator=__lowerCamelCase ,sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.get_sd_image(__lowerCamelCase ,fpaa=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,generator=__lowerCamelCase ,sample_posterior=__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ : Any = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = self.get_sd_vae_model()
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(__lowerCamelCase ).sample
assert sample.shape == image.shape
lowerCAmelCase__ : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowerCAmelCase__ : List[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_sd_vae_model()
lowerCAmelCase__ : Union[str, Any] = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ : str = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
lowerCAmelCase__ : Any = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
lowerCAmelCase__ : Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowerCAmelCase__ : Union[str, Any] = torch.tensor(__lowerCamelCase )
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_sd_vae_model(fpaa=__lowerCamelCase )
lowerCAmelCase__ : str = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) ,fpaa=__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Any = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_sd_vae_model()
lowerCAmelCase__ : Dict = self.get_sd_image(__lowerCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model.decode(__lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model.decode(__lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_sd_vae_model()
lowerCAmelCase__ : Tuple = self.get_sd_image(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.get_generator(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model.encode(__lowerCamelCase ).latent_dist
lowerCAmelCase__ : int = dist.sample(generator=__lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowerCAmelCase__ : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu()
lowerCAmelCase__ : int = torch.tensor(__lowerCamelCase )
lowerCAmelCase__ : Any = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,atol=__lowerCamelCase )
| 90
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
a__ =42
def __init__( self , A , A ) -> List[Any]:
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A = 1 , A = 5_0 , A = None , A = "pil" , A = True , **A , ) -> Union[Tuple, ImagePipelineOutput]:
_UpperCAmelCase : Optional[Any] = self.unet.config.sample_size
_UpperCAmelCase : Union[str, Any] = (batch_size, 3, img_size, img_size)
_UpperCAmelCase : List[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_UpperCAmelCase : int = randn_tensor(A , generator=A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_UpperCAmelCase : Any = self.scheduler.schedule[t]
_UpperCAmelCase : Optional[int] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.scheduler.add_noise_to_input(A , A , generator=A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCAmelCase : Any = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_UpperCAmelCase : Optional[Any] = self.scheduler.step(A , A , A , A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCAmelCase : str = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_UpperCAmelCase : Optional[int] = self.scheduler.step_correct(
A , A , A , A , step_output.prev_sample , step_output['''derivative'''] , )
_UpperCAmelCase : Tuple = step_output.prev_sample
_UpperCAmelCase : List[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Dict = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 506
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : float | Decimal , UpperCamelCase__ : float = 10**-10 ):
_UpperCAmelCase : str = a
while True:
_UpperCAmelCase : int = Decimal(UpperCamelCase__ ) - (
Decimal(eval(UpperCamelCase__ ) ) / Decimal(eval(str(diff(UpperCamelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCamelCase__ ) ) < precision: # noqa: S307
return float(UpperCamelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 506
| 1
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase : Optional[Any] =10
def _lowercase ( _SCREAMING_SNAKE_CASE : Any ) -> int:
'''simple docstring'''
__A : Tuple = 1
__A : str = max(lowerCamelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__A : list[list] = [[] for _ in range(lowerCamelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__A : List[str] = int((i / placement) % RADIX )
buckets[tmp].append(lowerCamelCase_ )
# put each buckets' contents into list_of_ints
__A : Dict = 0
for b in range(lowerCamelCase_ ):
for i in buckets[b]:
__A : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def _lowercase ( _SCREAMING_SNAKE_CASE : str ) -> str:
'''simple docstring'''
if not sentence:
return ""
__A : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 237
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Optional[int] = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
__lowerCAmelCase : List[str] = 1_0_2_4
__lowerCAmelCase : Any = 4_0_9_6
__lowerCAmelCase : str = 2_4
__lowerCAmelCase : List[Any] = 1_6
__lowerCAmelCase : Union[str, Any] = [5, 1_1, 1_7, 2_3]
__lowerCAmelCase : str = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__lowerCAmelCase : Dict = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
__lowerCAmelCase : str = 7_6_8
__lowerCAmelCase : Union[str, Any] = [1, 1, 1, 0.5]
__lowerCAmelCase : str = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
__lowerCAmelCase : List[str] = 1_5_0
__lowerCAmelCase : Any = 1_6
__lowerCAmelCase : Dict = (1, 3_8_4, 3_8_4)
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Optional[Any] = '''project'''
if "ade" in checkpoint_url:
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Union[str, Any] = 7_6_8
__lowerCAmelCase : Optional[Any] = [1, 1, 1, 0.5]
__lowerCAmelCase : Dict = 1_5_0
__lowerCAmelCase : List[str] = 1_6
__lowerCAmelCase : Union[str, Any] = '''huggingface/label-files'''
__lowerCAmelCase : List[str] = '''ade20k-id2label.json'''
__lowerCAmelCase : Any = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='''dataset''' ) ) , '''r''' ) )
__lowerCAmelCase : Dict = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase : List[Any] = idalabel
__lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : Optional[Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _lowercase ( lowercase__ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__lowerCAmelCase : Dict = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
__lowerCAmelCase : str = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__lowerCAmelCase : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__lowerCAmelCase : Dict = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__lowerCAmelCase : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCAmelCase : int = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
__lowerCAmelCase : str = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
__lowerCAmelCase : Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__lowerCAmelCase : int = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__lowerCAmelCase : Tuple = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__lowerCAmelCase : List[Any] = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__lowerCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__lowerCAmelCase : Tuple = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__lowerCAmelCase : int = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
__lowerCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__lowerCAmelCase : Any = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__lowerCAmelCase : List[str] = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__lowerCAmelCase : Any = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCAmelCase : List[str] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCAmelCase : List[str] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__lowerCAmelCase : Any = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__lowerCAmelCase : str = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__lowerCAmelCase : int = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__lowerCAmelCase : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__lowerCAmelCase : Any = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__lowerCAmelCase : Any = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
__lowerCAmelCase : int = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
__lowerCAmelCase : Any = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
__lowerCAmelCase : int = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
__lowerCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _lowercase ( lowercase__ , lowercase__ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase : Union[str, Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
__lowerCAmelCase : List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
__lowerCAmelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
__lowerCAmelCase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase : str = in_proj_bias[-config.hidden_size :]
def _lowercase ( ):
__lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase : Tuple = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase, __lowerCAmelCase : List[Any] = get_dpt_config(lowercase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__lowerCAmelCase : Any = torch.load(lowercase__ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(lowercase__ )
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase : Dict = state_dict.pop(lowercase__ )
__lowerCAmelCase : Tuple = val
# read in qkv matrices
read_in_q_k_v(lowercase__ , lowercase__ )
# load HuggingFace model
__lowerCAmelCase : int = DPTForSemanticSegmentation(lowercase__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# Check outputs on an image
__lowerCAmelCase : Any = 4_8_0 if '''ade''' in checkpoint_url else 3_8_4
__lowerCAmelCase : Any = DPTImageProcessor(size=lowercase__ )
__lowerCAmelCase : Optional[int] = prepare_img()
__lowerCAmelCase : str = image_processor(lowercase__ , return_tensors='''pt''' )
# forward pass
__lowerCAmelCase : List[str] = model(**lowercase__ ).logits if '''ade''' in checkpoint_url else model(**lowercase__ ).predicted_depth
if show_prediction:
__lowerCAmelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=lowercase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 492
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 492
| 1
|
'''simple docstring'''
from math import sqrt
def _SCREAMING_SNAKE_CASE( snake_case_ : int = 1_00_00_00 ) ->int:
'''simple docstring'''
_lowercase : int = 0
_lowercase : int = 0
_lowercase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 411
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase__ = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 411
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase : List[Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
lowercase : List[str] = {
"squeezebert/squeezebert-uncased": 5_12,
"squeezebert/squeezebert-mnli": 5_12,
"squeezebert/squeezebert-mnli-headless": 5_12,
}
lowercase : int = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Tuple = SqueezeBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Any:
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
A : Any = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
A : List[Any] = do_lower_case
A : Optional[int] = strip_accents
A : Any = tokenize_chinese_chars
A : str = normalizer_class(**__UpperCAmelCase )
A : Union[str, Any] = do_lower_case
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
A : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
A : List[Any] = [self.sep_token_id]
A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
A : str = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 542
|
import string
from math import logaa
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
A : Optional[int] = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : Dict = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
A : Dict = corpus_without_punctuation.split('''\n''' )
A : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowerCamelCase_ ))
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return round(tf * idf , 3 )
| 542
| 1
|
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class lowerCamelCase :
def __init__( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : bool = True , __snake_case : bool = False ):
'''simple docstring'''
_snake_case: int = scheduler
_snake_case: int = optimizers if isinstance(UpperCAmelCase_ , (list, tuple) ) else [optimizers]
_snake_case: Dict = split_batches
_snake_case: int = step_with_optimizer
_snake_case: Any = GradientState()
def SCREAMING_SNAKE_CASE_ ( self : Dict , *__snake_case : List[str] , **__snake_case : Dict ):
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_snake_case: Any = AcceleratorState().num_processes
for _ in range(UpperCAmelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ )
else:
self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE_ ( self : Tuple , __snake_case : Optional[int] ):
'''simple docstring'''
self.scheduler.load_state_dict(UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE_ ( self : str , *__snake_case : List[str] , **__snake_case : Optional[int] ):
'''simple docstring'''
return self.scheduler.print_lr(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 702
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Optional[Any] = tempfile.mkdtemp()
# fmt: off
_snake_case: Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_snake_case: str = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_snake_case: str = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_snake_case: Optional[int] = {'unk_token': '<unk>'}
_snake_case: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
_snake_case: Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case: Union[str, Any] = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **__snake_case : Tuple ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **__snake_case : Any ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str , **__snake_case : List[Any] ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_snake_case: Any = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Dict = self.get_tokenizer()
_snake_case: List[str] = self.get_rust_tokenizer()
_snake_case: List[str] = self.get_image_processor()
_snake_case: Union[str, Any] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case: Any = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
_snake_case: Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case: int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case: List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_snake_case: List[Any] = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
_snake_case: Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: List[Any] = self.get_image_processor()
_snake_case: List[str] = self.get_tokenizer()
_snake_case: Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: List[str] = self.prepare_image_inputs()
_snake_case: List[str] = image_processor(__snake_case , return_tensors='np' )
_snake_case: Dict = processor(images=__snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: List[Any] = self.get_image_processor()
_snake_case: Any = self.get_tokenizer()
_snake_case: List[str] = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: int = 'lower newer'
_snake_case: str = processor(text=__snake_case )
_snake_case: List[Any] = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: List[str] = self.get_image_processor()
_snake_case: List[Any] = self.get_tokenizer()
_snake_case: Tuple = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: Optional[int] = 'lower newer'
_snake_case: Union[str, Any] = self.prepare_image_inputs()
_snake_case: int = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Dict = self.get_image_processor()
_snake_case: Dict = self.get_tokenizer()
_snake_case: str = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case: List[Any] = processor.batch_decode(__snake_case )
_snake_case: Any = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: Union[str, Any] = self.get_image_processor()
_snake_case: Any = self.get_tokenizer()
_snake_case: Any = CLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_snake_case: Any = 'lower newer'
_snake_case: Optional[Any] = self.prepare_image_inputs()
_snake_case: int = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 273
| 0
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=2_4 , lowerCamelCase_=2 , lowerCamelCase_=6 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=None , lowerCamelCase_=1_0_0_0 , ) -> Optional[Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) -> Union[str, Any]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Any:
UpperCamelCase = LiltModel(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_)
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_)
UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Tuple:
UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
UpperCamelCase = model(
lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Any:
return True
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = LiltModelTester(self)
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7)
def UpperCAmelCase__ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_)
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_)
@slow
def UpperCAmelCase__ ( self) -> List[str]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_)
self.assertIsNotNone(lowerCamelCase_)
@require_torch
@slow
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowerCamelCase_)
UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_)
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_)
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_)
UpperCamelCase = torch.Size([1, 2, 7_6_8])
UpperCamelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowerCamelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1e-3))
| 34
|
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase ,_lowercase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase ,_lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to('''cpu''' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
UpperCamelCase = torch.tensor(_lowercase ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase ,requires_grad=_lowercase ,**_lowercase ).to(_lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(_lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase )
elif isinstance(_lowercase ,torch.Tensor ):
UpperCamelCase = value.to(_lowercase )
else:
UpperCamelCase = torch.tensor(_lowercase ,device=_lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase ,requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase ,nn.Linear ) or isinstance(_lowercase ,_lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase ,_lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase ,_lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,has_been_replaced=_lowercase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ):
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,_lowercase ,)
return replace_with_bnb_linear(*_lowercase ,**_lowercase )
def __snake_case ( *_lowercase ,**_lowercase ):
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,_lowercase ,)
return set_module_quantized_tensor_to_device(*_lowercase ,**_lowercase )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase ,_lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(_lowercase ,[] )
UpperCamelCase = len(_lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase ) - set(_lowercase )
UpperCamelCase = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase ,'''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names
| 34
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __a :
SCREAMING_SNAKE_CASE = BlenderbotConfig
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = "gelu"
def __init__( self : List[Any] , snake_case_ : str , snake_case_ : List[str]=13 , snake_case_ : List[str]=7 , snake_case_ : Optional[Any]=True , snake_case_ : Any=False , snake_case_ : List[Any]=99 , snake_case_ : Union[str, Any]=32 , snake_case_ : Tuple=2 , snake_case_ : str=4 , snake_case_ : List[Any]=37 , snake_case_ : Dict=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Union[str, Any]=20 , snake_case_ : List[str]=2 , snake_case_ : Tuple=1 , snake_case_ : str=0 , )-> List[Any]:
__lowerCAmelCase =parent
__lowerCAmelCase =batch_size
__lowerCAmelCase =seq_length
__lowerCAmelCase =is_training
__lowerCAmelCase =use_labels
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =eos_token_id
__lowerCAmelCase =pad_token_id
__lowerCAmelCase =bos_token_id
def UpperCamelCase ( self : Union[str, Any])-> List[Any]:
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__lowerCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__lowerCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1)
__lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase =prepare_blenderbot_inputs_dict(snake_case_ , snake_case_ , snake_case_)
return config, inputs_dict
def UpperCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : Dict)-> Any:
__lowerCAmelCase =TFBlenderbotModel(config=snake_case_).get_decoder()
__lowerCAmelCase =inputs_dict["""input_ids"""]
__lowerCAmelCase =input_ids[:1, :]
__lowerCAmelCase =inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase =inputs_dict["""head_mask"""]
__lowerCAmelCase =1
# first forward pass
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_)
__lowerCAmelCase , __lowerCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size)
__lowerCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__lowerCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1)
__lowerCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1)
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_)[0]
__lowerCAmelCase =model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__lowerCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1]))
__lowerCAmelCase =output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3)
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , ) -> Optional[Any]:
if attention_mask is None:
__lowerCAmelCase =tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : List[Any])-> Union[str, Any]:
__lowerCAmelCase =TFBlenderbotModelTester(self)
__lowerCAmelCase =ConfigTester(self , config_class=snake_case_)
def UpperCamelCase ( self : Any)-> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : int)-> List[str]:
__lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_)
@require_tokenizers
@require_tf
class __a ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = ["My friends are cool but they eat too many carbs."]
SCREAMING_SNAKE_CASE = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase ( self : List[str])-> Optional[int]:
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCamelCase ( self : Optional[Any])-> Optional[Any]:
__lowerCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def UpperCamelCase ( self : str)-> Any:
__lowerCAmelCase =self.tokenizer(self.src_text , return_tensors="""tf""")
__lowerCAmelCase =self.model.generate(
model_inputs.input_ids , )
__lowerCAmelCase =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case_)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 456
|
def __lowerCAmelCase ( __lowerCamelCase : list ) -> list:
__lowerCAmelCase =False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase =True
for i in range(0 , len(__lowerCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase =input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase =False
for i in range(1 , len(__lowerCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase =input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase =False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowercase_ = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowercase_ = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 456
| 1
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _snake_case :
"""simple docstring"""
def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=6_4 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> str:
"""simple docstring"""
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = vocab_size - 1
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_A , _A , _A , _A = self.prepare_config_and_inputs()
_A = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self , a , a , a ) -> List[Any]:
"""simple docstring"""
_A = GPTNeoXModel(config=a )
model.to(a )
model.eval()
_A = model(a , attention_mask=a )
_A = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , a , a , a ) -> Optional[Any]:
"""simple docstring"""
_A = True
_A = GPTNeoXModel(a )
model.to(a )
model.eval()
_A = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , a , a , a , a ) -> Optional[Any]:
"""simple docstring"""
_A = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
_A = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , a , a , a , a ) -> Union[str, Any]:
"""simple docstring"""
_A = self.num_labels
_A = GPTNeoXForQuestionAnswering(a )
model.to(a )
model.eval()
_A = model(a , attention_mask=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , a , a , a , a ) -> List[Any]:
"""simple docstring"""
_A = self.num_labels
_A = GPTNeoXForSequenceClassification(a )
model.to(a )
model.eval()
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , a , a , a , a ) -> Any:
"""simple docstring"""
_A = self.num_labels
_A = GPTNeoXForTokenClassification(a )
model.to(a )
model.eval()
_A = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , a , a , a ) -> int:
"""simple docstring"""
_A = True
_A = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
_A = model(a , attention_mask=a , use_cache=a )
_A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A = torch.cat([input_ids, next_tokens] , dim=-1 )
_A = torch.cat([input_mask, next_mask] , dim=-1 )
_A = model(a , attention_mask=a , output_hidden_states=a )
_A = output_from_no_past['''hidden_states'''][0]
_A = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['''hidden_states'''][0]
# select random slice
_A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A = output_from_no_past[:, -3:, random_slice_idx].detach()
_A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A = GPTNeoXModelTester(self )
_A = ConfigTester(self , config_class=a , hidden_size=6_4 , num_attention_heads=8 )
def lowercase_ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_A , _A , _A , _A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A , _A , _A , _A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_A , _A , _A , _A = self.model_tester.prepare_config_and_inputs_for_decoder()
_A = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_A , _A , _A , _A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def lowercase_ ( self ) -> str:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowercase_ ( self , a ) -> Optional[Any]:
"""simple docstring"""
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = ids_tensor([1, 1_0] , config.vocab_size )
_A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_A = GPTNeoXModel(a )
original_model.to(a )
original_model.eval()
_A = original_model(a ).last_hidden_state
_A = original_model(a ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_A = {'''type''': scaling_type, '''factor''': 10.0}
_A = GPTNeoXModel(a )
scaled_model.to(a )
scaled_model.eval()
_A = scaled_model(a ).last_hidden_state
_A = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1e-5 ) )
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
_A = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(a )
_A = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
_A = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
_A = model.generate(**a , do_sample=a , max_new_tokens=2_0 )
_A = tokenizer.batch_decode(a )[0]
self.assertEqual(a , a )
| 317
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
_A = '''A painting of a squirrel eating a burger'''
_A = jax.device_count()
_A = num_samples * [prompt]
_A = sd_pipe.prepare_inputs(a )
_A = replicate(a )
_A = shard(a )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(a , jax.device_count() )
_A = sd_pipe(a , a , a , num_inference_steps=2_5 , jit=a )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A = '''stabilityai/stable-diffusion-2'''
_A , _A = FlaxDPMSolverMultistepScheduler.from_pretrained(a , subfolder='''scheduler''' )
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
a , scheduler=a , revision='''bf16''' , dtype=jnp.bfloataa , )
_A = scheduler_params
_A = '''A painting of a squirrel eating a burger'''
_A = jax.device_count()
_A = num_samples * [prompt]
_A = sd_pipe.prepare_inputs(a )
_A = replicate(a )
_A = shard(a )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(a , jax.device_count() )
_A = sd_pipe(a , a , a , num_inference_steps=2_5 , jit=a )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 317
| 1
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case ( _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = filter(lambda _a : p.requires_grad , model.parameters() )
lowerCamelCase__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def snake_case ( _a: str , _a: Union[str, Any] )-> str:
'''simple docstring'''
if metric == "rouge2":
lowerCamelCase__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
lowerCamelCase__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
lowerCamelCase__ = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
lowerCamelCase__ = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
lowerCamelCase__ = ModelCheckpoint(
dirpath=_a , filename=_a , monitor=F'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case ( _a: int , _a: Dict )-> List[Any]:
'''simple docstring'''
return EarlyStopping(
monitor=F'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=_a , verbose=_a , )
class _a ( pl.Callback ):
def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=True ):
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
lowerCamelCase__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
lowerCamelCase__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCamelCase__ = od / 'test_results.txt'
lowerCamelCase__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCamelCase__ = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
lowerCamelCase__ = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'a+' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCamelCase__ = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
lowerCamelCase__ = val.item()
lowerCamelCase__ = F'{key}: {val:.6f}\n'
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
lowerCamelCase__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
try:
lowerCamelCase__ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCamelCase__ = pl_module.model.num_parameters()
lowerCamelCase__ = count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'test' )
@rank_zero_only
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 659
|
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_snake_case = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=1 ):
lowerCamelCase__ = tokenizer
lowerCamelCase__ = dataset
lowerCamelCase__ = len(SCREAMING_SNAKE_CASE__ ) if n_tasks is None else n_tasks
lowerCamelCase__ = n_copies
def __iter__( self : Any ):
lowerCamelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = start_length
lowerCamelCase__ = eof_strings
lowerCamelCase__ = tokenizer
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(SCREAMING_SNAKE_CASE__ )
def snake_case ( _a: List[Any] )-> Dict:
'''simple docstring'''
lowerCamelCase__ = re.split('(%s)' % '|'.join(_a ) , _a )
# last string should be ""
return "".join(string_list[:-2] )
def snake_case ( _a: List[Any] , _a: Optional[int] , _a: str , _a: Union[str, Any] , _a: Dict , _a: Optional[int]=20 , **_a: Optional[int] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = defaultdict(_a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_a ) ):
with torch.no_grad():
lowerCamelCase__ = batch['ids'].shape[-1]
lowerCamelCase__ = accelerator.unwrap_model(_a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_a , **_a )
# each task is generated batch_size times
lowerCamelCase__ = batch['task_id'].repeat(_a )
lowerCamelCase__ = accelerator.pad_across_processes(
_a , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase__ , lowerCamelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase__ = generated_tokens.cpu().numpy()
lowerCamelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_a , _a ):
gen_token_dict[task].append(_a )
lowerCamelCase__ = [[] for _ in range(_a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase__ = tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
code_gens[task].append(remove_last_block(_a ) )
return code_gens
def snake_case ( )-> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser(_a )
lowerCamelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase__ = 'false'
if args.num_workers is None:
lowerCamelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase__ = Accelerator()
set_seed(args.seed , device_specific=_a )
# Load model and tokenizer
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase__ = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _a , _a )] ),
}
# Load evaluation dataset and metric
lowerCamelCase__ = load_dataset('openai_humaneval' )
lowerCamelCase__ = load_metric('code_eval' )
lowerCamelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
lowerCamelCase__ = args.n_samples // args.batch_size
lowerCamelCase__ = TokenizedDataset(_a , human_eval['test'] , n_copies=_a , n_tasks=_a )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase__ = DataLoader(_a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase__ = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(_a , _a )
lowerCamelCase__ = complete_code(
_a , _a , _a , _a , n_tasks=_a , batch_size=args.batch_size , **_a , )
if accelerator.is_main_process:
lowerCamelCase__ = []
for task in tqdm(range(_a ) ):
lowerCamelCase__ = human_eval['test'][task]['test']
lowerCamelCase__ = F'check({human_eval["test"][task]["entry_point"]})'
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase__ , lowerCamelCase__ = code_eval_metric.compute(
references=_a , predictions=_a , num_workers=args.num_workers )
print(F'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_a , _a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 659
| 1
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase_ :
_a : Dict = None
def __a ( self : List[str] ):
lowerCamelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __UpperCamelCase )
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : List[Any] = os.path.join(__UpperCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(__UpperCamelCase )
lowerCamelCase_ : Dict = self.feature_extraction_class.from_json_file(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self : Tuple ):
lowerCamelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : Optional[Any] = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
lowerCamelCase_ : Optional[int] = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self : Tuple ):
lowerCamelCase_ : List[Any] = self.feature_extraction_class()
self.assertIsNotNone(__UpperCamelCase )
| 364
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCamelCase :List[Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase ( __snake_case ):
a: bool = field(default=__snake_case , metadata={"help": "Whether to use SortishSampler or not."} )
a: bool = field(
default=__snake_case , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
a: Optional[int] = field(
default=__snake_case , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
a: Optional[int] = field(
default=__snake_case , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
a: Optional[Union[str, Path, GenerationConfig]] = field(
default=__snake_case , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _A ( self: Optional[Any] ):
_a = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = v.to_dict()
return d
| 487
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = """ctrl"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=246_534 , lowercase=256 , lowercase=1_280 , lowercase=8_192 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-6 , lowercase=0.02 , lowercase=True , **lowercase , ) -> List[str]:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = dff
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = use_cache
super().__init__(**lowercase )
| 721
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
return np.maximum(0 , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 393
| 0
|
def lowerCamelCase_ ( _lowercase ) -> int:
if not isinstance(_lowercase , _lowercase ):
raise TypeError("Input value must be an 'int' type" )
__A : Optional[Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 520
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase_ ( _lowercase ) -> Any:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
super().__init__()
__A : str = module
__A : Union[str, Any] = nn.Sequential(
nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , )
__A : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __UpperCAmelCase( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = """bigscience/bloom-1b7"""
# Constant values
lowerCamelCase_ : int = 2.109_6595_5269_2574
lowerCamelCase_ : str = """Hello my name is"""
lowerCamelCase_ : Tuple = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCamelCase_ : Any = 1_0
def __UpperCAmelCase( self ):
# Models and tokenizer
__A : Optional[int] = AutoTokenizer.from_pretrained(self.model_name )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
super().setUp()
# Models and tokenizer
__A : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
__A : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
def __UpperCAmelCase( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase( self ):
__A : Any = self.model_abit.config
self.assertTrue(hasattr(__UpperCAmelCase , "quantization_config" ) )
__A : List[str] = config.to_dict()
__A : List[Any] = config.to_diff_dict()
__A : str = config.to_json_string()
def __UpperCAmelCase( self ):
from bitsandbytes.nn import Paramsabit
__A : Optional[Any] = self.model_fpaa.get_memory_footprint()
__A : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__A : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __UpperCAmelCase( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __UpperCAmelCase( self ):
__A : Dict = self.tokenizer(self.input_text , return_tensors="pt" )
__A : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase( self ):
__A : Optional[Any] = BitsAndBytesConfig()
__A : Tuple = True
__A : Optional[int] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , device_map="auto" )
__A : Any = self.tokenizer(self.input_text , return_tensors="pt" )
__A : List[Any] = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def __UpperCAmelCase( self ):
with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : str = BitsAndBytesConfig()
with self.assertRaises(__UpperCAmelCase ):
__A : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map="auto" , bnb_abit_quant_type="nf4" , )
def __UpperCAmelCase( self ):
with self.assertRaises(__UpperCAmelCase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__A : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" )
__A : List[Any] = self.model_fpaa.to(torch.floataa )
__A : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__A : Tuple = self.model_fpaa.to("cpu" )
# Check this does not throw an error
__A : int = self.model_fpaa.half()
# Check this does not throw an error
__A : Tuple = self.model_fpaa.float()
def __UpperCAmelCase( self ):
__A : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=__UpperCAmelCase , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase( cls ):
__A : List[Any] = "t5-small"
__A : Any = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__A : Union[str, Any] = AutoTokenizer.from_pretrained(cls.model_name )
__A : str = "Translate in German: Hello, my dog is cute"
def __UpperCAmelCase( self ):
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase( self ):
from transformers import TaForConditionalGeneration
__A : Optional[int] = TaForConditionalGeneration._keep_in_fpaa_modules
__A : Union[str, Any] = None
# test with `t5-small`
__A : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
__A : Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__A : str = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__A : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
__A : List[str] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__A : Optional[int] = model.generate(**__UpperCAmelCase )
__A : List[Any] = modules
def __UpperCAmelCase( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__A : Dict = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__A : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__A : Union[str, Any] = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__A : int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
__A : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__A : int = model.generate(**__UpperCAmelCase )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
super().setUp()
# model_name
__A : Any = "bigscience/bloom-560m"
__A : Tuple = "t5-small"
# Different types of model
__A : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
# Sequence classification model
__A : Any = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
# CausalLM model
__A : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
# Seq2seq model
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map="auto" )
def __UpperCAmelCase( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
super().setUp()
def __UpperCAmelCase( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase( self ):
__A : List[Any] = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__A : Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
super().setUp()
def __UpperCAmelCase( self ):
__A : str = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__A : str = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
__A : Dict = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : List[str] = "facebook/opt-350m"
super().setUp()
def __UpperCAmelCase( self ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
__A : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__A : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__A : List[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCAmelCase ) ):
__A : int = LoRALayer(module.q_proj , rank=16 )
__A : List[str] = LoRALayer(module.k_proj , rank=16 )
__A : str = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__A : int = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__A : str = model.forward(**__UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """gpt2-xl"""
lowerCamelCase_ : Dict = 3.3191_8548_5415_2187
| 520
| 1
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase( ):
raise RuntimeError('CUDA out of memory.' )
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self : int ) -> Optional[Any]:
super().__init__()
UpperCAmelCase : Tuple = nn.Linear(3 , 4 )
UpperCAmelCase : Optional[Any] = nn.BatchNormad(4 )
UpperCAmelCase : Any = nn.Linear(4 , 5 )
def UpperCAmelCase_ ( self : Any , lowercase_ : Dict ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ : Tuple ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowercase_ , [128, 64, 32, 16, 8] )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
UpperCAmelCase : Tuple = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ : List[Any] , lowercase_ : Dict ):
nonlocal batch_sizes
batch_sizes.append(lowercase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase , UpperCAmelCase : str = mock_training_loop_function('hello' )
self.assertListEqual(lowercase_ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowercase_ : List[str] ):
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCAmelCase_ ( self : Any ) -> Any:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ : Any ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCAmelCase_ ( self : Dict ) -> int:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowercase_ : int , lowercase_ : int , lowercase_ : List[str] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowercase_ : List[str] ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowercase_ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
UpperCAmelCase : Tuple = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowercase_ )
UpperCAmelCase : List[str] = release_memory(lowercase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowercase_ )
| 695
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase : str = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
UpperCAmelCase : Any = self.dummy_uncond_unet
UpperCAmelCase : Tuple = DDIMScheduler()
UpperCAmelCase : Optional[Any] = self.dummy_vq_model
UpperCAmelCase : str = LDMPipeline(unet=lowercase_ , vqvae=lowercase_ , scheduler=lowercase_ )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : int = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' ).images
UpperCAmelCase : int = torch.manual_seed(0 )
UpperCAmelCase : Tuple = ldm(generator=lowercase_ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCAmelCase : Tuple = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase_ )
ldm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Tuple = torch.manual_seed(0 )
UpperCAmelCase : Dict = ldm(generator=lowercase_ , num_inference_steps=5 , output_type='numpy' ).images
UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCAmelCase : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 695
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
__lowerCAmelCase: Dict = True
while ask_again:
__lowerCAmelCase: int = input(__SCREAMING_SNAKE_CASE )
try:
if default is not None and len(__SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(__SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[] , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
__lowerCAmelCase: Optional[Any] = BulletMenu(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = menu.run(default_choice=__SCREAMING_SNAKE_CASE )
return convert_value(__SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = int(__SCREAMING_SNAKE_CASE )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: Dict = int(__SCREAMING_SNAKE_CASE )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase: Optional[Any] = int(__SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
__lowerCAmelCase: Any = int(__SCREAMING_SNAKE_CASE )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase: str = int(__SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def a__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return {"yes": True, "no": False}[value.lower()]
class snake_case ( argparse.RawDescriptionHelpFormatter ):
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = super()._format_usage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
__lowerCAmelCase: Union[str, Any] = usage.replace("<command> [<args>] " , "")
return usage
| 346
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_lowerCamelCase : Any = 'Usage of script: script_name <size_of_canvas:int>'
_lowerCamelCase : Dict = [0] * 100 + [1] * 10
random.shuffle(choice)
def _lowerCAmelCase ( __magic_name__ :int ):
UpperCAmelCase_ = [[False for i in range(__magic_name__ )] for j in range(__magic_name__ )]
return canvas
def _lowerCAmelCase ( __magic_name__ :list[list[bool]] ):
for i, row in enumerate(__magic_name__ ):
for j, _ in enumerate(__magic_name__ ):
UpperCAmelCase_ = bool(random.getrandbits(1 ) )
def _lowerCAmelCase ( __magic_name__ :list[list[bool]] ):
UpperCAmelCase_ = np.array(__magic_name__ )
UpperCAmelCase_ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__magic_name__ ):
for c, pt in enumerate(__magic_name__ ):
UpperCAmelCase_ = __judge_point(
__magic_name__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase_ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase_ = current_canvas.tolist()
return return_canvas
def _lowerCAmelCase ( __magic_name__ :bool , __magic_name__ :list[list[bool]] ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase_ = pt
if pt:
if alive < 2:
UpperCAmelCase_ = False
elif alive == 2 or alive == 3:
UpperCAmelCase_ = True
elif alive > 3:
UpperCAmelCase_ = False
else:
if alive == 3:
UpperCAmelCase_ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_lowerCamelCase : Union[str, Any] = int(sys.argv[1])
# main working structure of this module.
_lowerCamelCase : Any = create_canvas(canvas_size)
seed(c)
_lowerCamelCase , _lowerCamelCase : Any = plt.subplots()
fig.show()
_lowerCamelCase : Optional[int] = ListedColormap(['w', 'k'])
try:
while True:
_lowerCamelCase : Union[str, Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 121
| 0
|
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = GPTaTokenizer
lowercase__ : str = GPTaTokenizerFast
lowercase__ : List[Any] = True
lowercase__ : List[str] = {"""add_prefix_space""": True}
lowercase__ : Optional[Any] = False
def snake_case__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__lowercase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__lowercase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowercase = {"""unk_token""": """<unk>"""}
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def snake_case__ ( self : Union[str, Any] , **lowercase : Tuple ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : Dict , **lowercase : Optional[int] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def snake_case__ ( self : List[str] , lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = """lower newer"""
__lowercase = """lower newer"""
return input_text, output_text
def snake_case__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase = """lower newer"""
__lowercase = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowercase = tokenizer.tokenize(lowercase , add_prefix_space=lowercase )
self.assertListEqual(lowercase , lowercase )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def snake_case__ ( self : Any ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase )
__lowercase = """lower newer"""
# Testing tokenization
__lowercase = tokenizer.tokenize(lowercase , add_prefix_space=lowercase )
__lowercase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids without special tokens
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
__lowercase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing conversion to ids with special tokens
__lowercase = self.get_rust_tokenizer(add_prefix_space=lowercase )
__lowercase = tokenizer.encode(lowercase , add_prefix_space=lowercase )
__lowercase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
# Testing the unknown token
__lowercase = tokens + [rust_tokenizer.unk_token]
__lowercase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def snake_case__ ( self : List[str] , *lowercase : Tuple , **lowercase : int ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Optional[Any]=15 ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
# Simple input
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase = ("""This is a simple input""", """This is a pair""")
__lowercase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="""max_length""" , )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(
lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding="""max_length""" , )
def snake_case__ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input looooooooong""", """This is a simple input"""]
__lowercase = ("""This is a simple input""", """This is a pair""")
__lowercase = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer(lowercase , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
__lowercase = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors="""np""" )
__lowercase = tokenizer(*lowercase , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
__lowercase = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def snake_case__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = """$$$"""
__lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase , add_bos_token=lowercase )
__lowercase = """This is a simple input"""
__lowercase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer(lowercase )
__lowercase = tokenizer(lowercase )
self.assertEqual(out_s.input_ids[0] , lowercase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowercase = tokenizer.decode(out_s.input_ids )
__lowercase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = [self.get_tokenizer(do_lower_case=lowercase , add_bos_token=lowercase )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = """Encode this."""
__lowercase = """This one too please."""
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
encoded_sequence += tokenizer.encode(lowercase , add_special_tokens=lowercase )
__lowercase = tokenizer.encode_plus(
lowercase , lowercase , add_special_tokens=lowercase , return_special_tokens_mask=lowercase , )
__lowercase = encoded_sequence_dict["""input_ids"""]
__lowercase = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(lowercase ) , len(lowercase ) )
__lowercase = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase )
]
__lowercase = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase , lowercase )
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase )
__lowercase = """A photo of a cat"""
__lowercase = tokenizer.encode(
lowercase , )
self.assertEqual(lowercase , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("""test_opt""" )
__lowercase = AutoTokenizer.from_pretrained("""./test_opt""" )
__lowercase = tokenizer.encode(
lowercase , )
self.assertEqual(lowercase , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case__ ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=lowercase )
__lowercase = """A photo of a cat"""
__lowercase = tokenizer.encode(
lowercase , )
# Same as above
self.assertEqual(lowercase , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def snake_case__ ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase )
__lowercase = """bos"""
__lowercase = tokenizer.get_vocab()["""bos"""]
__lowercase = """A photo of a cat"""
__lowercase = tokenizer.encode(
lowercase , )
# We changed the bos token
self.assertEqual(lowercase , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("""./tok""" )
__lowercase = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
__lowercase = tokenizer.encode(
lowercase , )
self.assertEqual(lowercase , [31_957, 250, 1_345, 9, 10, 4_758] )
| 710
|
import random
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ = False ) -> dict:
__lowercase = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def UpperCAmelCase__ ( lowercase__ ) -> dict:
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634
| 0
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A ( _a ):
lowercase_ = 'Wav2Vec2FeatureExtractor'
lowercase_ = 'AutoTokenizer'
def __init__( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.feature_extractor
_a = False
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
return super().from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , lowerCAmelCase_ , )
_a = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = WavaVecaCTCTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
return cls(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
def __call__( self : str , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_a = kwargs.pop('''raw_speech''' )
else:
_a = kwargs.pop('''audio''' , lowerCAmelCase_ )
_a = kwargs.pop('''sampling_rate''' , lowerCAmelCase_ )
_a = kwargs.pop('''text''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_a = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None:
_a = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self : str , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = kwargs.pop('''input_features''' , lowerCAmelCase_ )
_a = kwargs.pop('''labels''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if input_features is not None:
_a = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is not None:
_a = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_a = labels['''input_ids''']
return input_features
def __lowerCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@contextmanager
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_a = True
_a = self.tokenizer
yield
_a = self.feature_extractor
_a = False
| 22
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( _a ,unittest.TestCase ):
lowercase_ = ImageGPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = ImageGPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
_a = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' )
image_processor_first.to_json_file(lowerCAmelCase_ )
_a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase_ )
_a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict()
_a = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase_ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
_a = Image.open(dataset[4]['''file'''] )
_a = Image.open(dataset[5]['''file'''] )
_a = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
_a = prepare_images()
# test non-batched
_a = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_a = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ )
# test batched
_a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_a = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
| 22
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
])
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Tuple ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCamelCase__ , )
assert hasattr(self , "env" )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Dict ):
# configuration for running training on smdistributed Model Parallel
A__ : Optional[Any] ={
"enabled": True,
"processes_per_host": 8,
}
A__ : List[Any] ={
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
A__ : Optional[Any] ={"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
A__ : str ="trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="py36" , )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# create estimator
A__ : str =self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
A__ : List[Any] =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ : Dict =list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
A__ : str =list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ : Optional[Any] =(
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase__ )
| 595
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 595
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = tempfile.mkdtemp()
lowercase__ : Union[str, Any] = BlipImageProcessor()
lowercase__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
lowercase__ : Any = BlipProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def snake_case ( self : str , **SCREAMING_SNAKE_CASE : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def snake_case ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : Union[str, Any] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : str ):
lowercase__ : str = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : Union[str, Any] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowercase__ : Any = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.get_image_processor()
lowercase__ : int = self.get_tokenizer()
lowercase__ : List[Any] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Tuple = self.prepare_image_inputs()
lowercase__ : Optional[int] = image_processor(snake_case__ , return_tensors="np" )
lowercase__ : Dict = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : Dict ):
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Any = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Optional[Any] = 'lower newer'
lowercase__ : str = processor(text=snake_case__ )
lowercase__ : List[Any] = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : List[Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Optional[int] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Tuple = 'lower newer'
lowercase__ : Tuple = self.prepare_image_inputs()
lowercase__ : List[Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def snake_case ( self : Optional[int] ):
lowercase__ : Any = self.get_image_processor()
lowercase__ : str = self.get_tokenizer()
lowercase__ : Optional[Any] = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : str = processor.batch_decode(snake_case__ )
lowercase__ : Any = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Dict = BlipProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__ : Optional[int] = 'lower newer'
lowercase__ : List[Any] = self.prepare_image_inputs()
lowercase__ : Optional[int] = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 496
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = jnp.ones((batch_size, length) ) / length
return scores
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(batch_size=2 ,length=snake_case__ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE_ : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ : int = jax.nn.softmax(snake_case__ ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE_ : Any = jax.nn.softmax(temp_dist_warper_sharper(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : int = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE_ : int = 5
SCREAMING_SNAKE_CASE_ : Any = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, length) ).copy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp_safety_check(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : str = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.exp(top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ : List[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ : Any = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : str = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((batch_size, 20) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 5
SCREAMING_SNAKE_CASE_ : List[str] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 20
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, 1) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 20
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Tuple = 5
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, 4) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 4
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = 10
# no processor list
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# with processor list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : Any = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : Dict = 15
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = 10
# no processor list
def run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
# with processor list
def run_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : List[str] = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
SCREAMING_SNAKE_CASE_ : Tuple = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = jitted_run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = jitted_run_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 105
| 0
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _a ( lowerCamelCase_ = "isbn/0140328726" ):
snake_case : str =olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
snake_case : int =F'''{olid} is not a valid Open Library olid'''
raise ValueError(lowerCamelCase_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def _a ( lowerCamelCase_ ):
snake_case : Any ={
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
snake_case : Tuple ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
snake_case : int =[
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
snake_case : Tuple =data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
snake_case : Tuple =''', '''.join(lowerCamelCase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
A : Optional[Any] = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(f"\nSearching Open Library for ISBN: {isbn}...\n")
try:
A : List[Any] = summarize_book(get_openlibrary_data(f"isbn/{isbn}"))
print("""\n""".join(f"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"Sorry, there are no results for ISBN: {isbn}.")
| 136
|
'''simple docstring'''
def _a ( lowerCamelCase_ = 1_00 ):
snake_case : List[Any] =0
snake_case : List[str] =0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 136
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = """Hello world! cécé herlolip"""
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Tuple ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = FairseqRobertaModel.from_pretrained(UpperCAmelCase__ )
roberta.eval() # disable dropout
__lowerCamelCase = roberta.model.encoder.sentence_encoder
__lowerCamelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_14 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,)
if classification_head:
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' ,UpperCAmelCase__ )
__lowerCamelCase = XLMRobertaXLForSequenceClassification(UpperCAmelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(UpperCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase = roberta_sent_encoder.embed_tokens.weight
__lowerCamelCase = roberta_sent_encoder.embed_positions.weight
__lowerCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCamelCase = roberta_sent_encoder.layer_norm.weight
__lowerCamelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase = model.roberta.encoder.layer[i]
__lowerCamelCase = roberta_sent_encoder.layers[i]
__lowerCamelCase = layer.attention
__lowerCamelCase = roberta_layer.self_attn_layer_norm.weight
__lowerCamelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCamelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCamelCase = roberta_layer.self_attn.q_proj.weight
__lowerCamelCase = roberta_layer.self_attn.q_proj.bias
__lowerCamelCase = roberta_layer.self_attn.k_proj.weight
__lowerCamelCase = roberta_layer.self_attn.k_proj.bias
__lowerCamelCase = roberta_layer.self_attn.v_proj.weight
__lowerCamelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCamelCase = roberta_layer.self_attn.out_proj.weight
__lowerCamelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCamelCase = roberta_layer.final_layer_norm.weight
__lowerCamelCase = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCamelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase = roberta_layer.fca.weight
__lowerCamelCase = roberta_layer.fca.bias
# output
__lowerCamelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase = roberta_layer.fca.weight
__lowerCamelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].dense.weight
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].dense.bias
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
__lowerCamelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__lowerCamelCase = roberta.model.encoder.lm_head.dense.weight
__lowerCamelCase = roberta.model.encoder.lm_head.dense.bias
__lowerCamelCase = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase = roberta.model.encoder.lm_head.weight
__lowerCamelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase = roberta.encode(UpperCAmelCase__ ).unsqueeze(0 ) # batch of size 1
__lowerCamelCase = model(UpperCAmelCase__ )[0]
if classification_head:
__lowerCamelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(UpperCAmelCase__ ) )
else:
__lowerCamelCase = roberta.model(UpperCAmelCase__ )[0]
print(our_output.shape ,their_output.shape )
__lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCamelCase = torch.allclose(UpperCAmelCase__ ,UpperCAmelCase__ ,atol=1e-3 )
print('''Do both models output the same tensors?''' ,'''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(UpperCAmelCase__ ).mkdir(parents=UpperCAmelCase__ ,exist_ok=UpperCAmelCase__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
a_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 175
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : List[Any] ={
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str =["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
A_ : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 483
| 0
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase__ :Optional[Any] = get_logger(__name__)
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , A__ : Optional[str] = None ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = (
os.path.join(A__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCamelCase : Union[str, Any] = Extractor
def a_ ( self : Tuple , A__ : str ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCamelCase : Optional[int] = os.path.abspath(A__ )
return os.path.join(self.extract_dir , hash_url_to_filename(A__ ) )
def a_ ( self : Union[str, Any] , A__ : str , A__ : bool ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(A__ ) and not (os.path.isdir(A__ ) and os.listdir(A__ ))
)
def a_ ( self : int , A__ : str , A__ : bool = False ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = self.extractor.infer_extractor_format(A__ )
if not extractor_format:
return input_path
__lowerCamelCase : Union[str, Any] = self._get_output_path(A__ )
if self._do_extract(A__ , A__ ):
self.extractor.extract(A__ , A__ , A__ )
return output_path
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
@classmethod
@abstractmethod
def a_ ( cls : List[Any] , A__ : Union[Path, str] , **A__ : Dict ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
...
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case__ : List[bytes] = []
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : int ):
"""simple docstring"""
with open(A__ , """rb""" ) as f:
return f.read(A__ )
@classmethod
def a_ ( cls : Any , A__ : Union[Path, str] , A__ : bytes = b"" ):
"""simple docstring"""
if not magic_number:
__lowerCamelCase : Optional[int] = max(len(A__ ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCamelCase : Dict = cls.read_magic_number(A__ , A__ )
except OSError:
return False
return any(magic_number.startswith(A__ ) for cls_magic_number in cls.magic_numbers )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
@classmethod
def a_ ( cls : Dict , A__ : Union[Path, str] , **A__ : Union[str, Any] ):
"""simple docstring"""
return tarfile.is_tarfile(A__ )
@staticmethod
def a_ ( A__ : Optional[int] , A__ : List[Any] ):
"""simple docstring"""
def resolved(A__ : str ) -> str:
return os.path.realpath(os.path.abspath(A__ ) )
def badpath(A__ : str , A__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A__ , A__ ) ).startswith(A__ )
def badlink(A__ : int , A__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCamelCase : List[Any] = resolved(os.path.join(A__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A__ )
__lowerCamelCase : Tuple = resolved(A__ )
for finfo in members:
if badpath(finfo.name , A__ ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(A__ , A__ ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(A__ , A__ ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(A__ , exist_ok=A__ )
__lowerCamelCase : int = tarfile.open(A__ )
tar_file.extractall(A__ , members=TarExtractor.safemembers(A__ , A__ ) )
tar_file.close()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Optional[Any] = [B'\x1F\x8B']
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
with gzip.open(A__ , """rb""" ) as gzip_file:
with open(A__ , """wb""" ) as extracted_file:
shutil.copyfileobj(A__ , A__ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : int = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def a_ ( cls : Tuple , A__ : Union[Path, str] , A__ : bytes = b"" ):
"""simple docstring"""
if super().is_extractable(A__ , magic_number=A__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A__ , """rb""" ) as fp:
__lowerCamelCase : Dict = _EndRecData(A__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCamelCase : Tuple = fp.read(A__ ) # CD is where we expect it to be
if len(A__ ) == sizeCentralDir:
__lowerCamelCase : Optional[Any] = struct.unpack(A__ , A__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
os.makedirs(A__ , exist_ok=A__ )
with zipfile.ZipFile(A__ , """r""" ) as zip_file:
zip_file.extractall(A__ )
zip_file.close()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Union[str, Any] = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
with lzma.open(A__ ) as compressed_file:
with open(A__ , """wb""" ) as extracted_file:
shutil.copyfileobj(A__ , A__ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : Dict = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(A__ , exist_ok=A__ )
__lowerCamelCase : int = rarfile.RarFile(A__ )
rf.extractall(A__ )
rf.close()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : int = [B'\x28\xb5\x2F\xFD']
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__lowerCamelCase : str = zstd.ZstdDecompressor()
with open(A__ , """rb""" ) as ifh, open(A__ , """wb""" ) as ofh:
dctx.copy_stream(A__ , A__ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : int = [B'\x42\x5A\x68']
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
with bza.open(A__ , """rb""" ) as compressed_file:
with open(A__ , """wb""" ) as extracted_file:
shutil.copyfileobj(A__ , A__ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : List[str] = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(A__ , exist_ok=A__ )
with pyazr.SevenZipFile(A__ , """r""" ) as archive:
archive.extractall(A__ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : str = [B'\x04\x22\x4D\x18']
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : Union[Path, str] ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(A__ , """rb""" ) as compressed_file:
with open(A__ , """wb""" ) as extracted_file:
shutil.copyfileobj(A__ , A__ )
class SCREAMING_SNAKE_CASE :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
snake_case__ : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def a_ ( cls : Union[str, Any] ):
"""simple docstring"""
return max(
len(A__ )
for extractor in cls.extractors.values()
if issubclass(A__ , A__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def a_ ( A__ : Union[Path, str] , A__ : int ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(A__ , magic_number_length=A__ )
except OSError:
return b""
@classmethod
def a_ ( cls : List[str] , A__ : Union[Path, str] , A__ : bool = False ):
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=A__ , )
__lowerCamelCase : Union[str, Any] = cls.infer_extractor_format(A__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def a_ ( cls : int , A__ : Union[Path, str] ): # <Added version="2.4.0"/>
"""simple docstring"""
__lowerCamelCase : int = cls._get_magic_number_max_length()
__lowerCamelCase : List[str] = cls._read_magic_number(A__ , A__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A__ , magic_number=A__ ):
return extractor_format
@classmethod
def a_ ( cls : Union[str, Any] , A__ : Union[Path, str] , A__ : Union[Path, str] , A__ : Optional[str] = None , A__ : Optional[BaseExtractor] = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(A__ ) , exist_ok=A__ )
# Prevent parallel extractions
__lowerCamelCase : Optional[Any] = str(Path(A__ ).with_suffix(""".lock""" ) )
with FileLock(A__ ):
shutil.rmtree(A__ , ignore_errors=A__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A__ , A__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=A__ , )
__lowerCamelCase : List[str] = extractor if extractor != """deprecated""" else extractor_format
else:
__lowerCamelCase : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(A__ , A__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=A__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A__ ):
return extractor.extract(A__ , A__ )
| 483
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
snake_case__ : Union[str, Any] = ['torch', 'torchsde']
def __init__( self : int , *A__ : Optional[Any] , **A__ : Dict ):
"""simple docstring"""
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def a_ ( cls : Optional[Any] , *A__ : str , **A__ : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def a_ ( cls : Union[str, Any] , *A__ : Any , **A__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(cls , ["""torch""", """torchsde"""] )
| 483
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: Union[str, Any] = logging.get_logger(__name__)
a__: List[str] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
__SCREAMING_SNAKE_CASE = """informer"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = "student_t",__lowerCamelCase = "nll",__lowerCamelCase = 1,__lowerCamelCase = None,__lowerCamelCase = "mean",__lowerCamelCase = 0,__lowerCamelCase = 0,__lowerCamelCase = 0,__lowerCamelCase = 0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = 64,__lowerCamelCase = 32,__lowerCamelCase = 32,__lowerCamelCase = 2,__lowerCamelCase = 2,__lowerCamelCase = 2,__lowerCamelCase = 2,__lowerCamelCase = True,__lowerCamelCase = "gelu",__lowerCamelCase = 0.05,__lowerCamelCase = 0.1,__lowerCamelCase = 0.1,__lowerCamelCase = 0.1,__lowerCamelCase = 0.1,__lowerCamelCase = 100,__lowerCamelCase = 0.02,__lowerCamelCase=True,__lowerCamelCase = "prob",__lowerCamelCase = 5,__lowerCamelCase = True,**__lowerCamelCase,):
# time series specific configuration
A__ = prediction_length
A__ = context_length or prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
A__ = cardinality
else:
A__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
A__ = embedding_dimension
else:
A__ = [min(50,(cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(self.lags_sequence ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
# Informer
A__ = attention_type
A__ = sampling_factor
A__ = distil
super().__init__(is_encoder_decoder=__lowerCamelCase,**__lowerCamelCase )
@property
def UpperCamelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 190
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase (__snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[Any] = LongformerTokenizer
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : List[str] = LongformerTokenizerFast
_SCREAMING_SNAKE_CASE : Dict = True
def __snake_case ( self :Dict ) ->Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase : Tuple = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowercase : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase : Dict = {"""unk_token""": """<unk>"""}
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def __snake_case ( self :List[Any] , **__magic_name__ :Any ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __snake_case ( self :Optional[Any] , **__magic_name__ :Optional[Any] ) ->Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __snake_case ( self :Tuple , __magic_name__ :Dict ) ->str:
lowercase : List[str] = """lower newer"""
lowercase : Any = """lower newer"""
return input_text, output_text
def __snake_case ( self :Tuple ) ->Union[str, Any]:
lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase : List[str] = """lower newer"""
lowercase : Tuple = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase : Optional[int] = tokenizer.tokenize(__magic_name__ ) # , add_prefix_space=True)
self.assertListEqual(__magic_name__ , __magic_name__ )
lowercase : str = tokens + [tokenizer.unk_token]
lowercase : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def __snake_case ( self :Any ) ->str:
lowercase : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__magic_name__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__magic_name__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def __snake_case ( self :Tuple ) ->Union[str, Any]:
lowercase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
lowercase : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
lowercase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
lowercase : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : List[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : int = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __snake_case ( self :Optional[Any] ) ->int:
lowercase : Optional[int] = self.get_tokenizer()
lowercase : Tuple = """Encode this sequence."""
lowercase : Dict = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowercase : List[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
lowercase : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__magic_name__ , __magic_name__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowercase : Union[str, Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
lowercase : List[str] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
# Testing spaces after special tokens
lowercase : Any = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ )} ) # mask token has a left space
lowercase : Any = tokenizer.convert_tokens_to_ids(__magic_name__ )
lowercase : Any = """Encode <mask> sequence"""
lowercase : str = """Encode <mask>sequence"""
lowercase : Optional[int] = tokenizer.encode(__magic_name__ )
lowercase : List[str] = encoded.index(__magic_name__ )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__magic_name__ , __magic_name__ )
lowercase : Tuple = tokenizer.encode(__magic_name__ )
lowercase : List[str] = encoded.index(__magic_name__ )
lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__magic_name__ , __magic_name__ )
def __snake_case ( self :Any ) ->int:
pass
def __snake_case ( self :List[Any] ) ->str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : int = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
lowercase : List[str] = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
lowercase : Optional[int] = """A, <mask> AllenNLP sentence."""
lowercase : Any = tokenizer_r.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ )
lowercase : str = tokenizer_p.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ , return_token_type_ids=__magic_name__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__magic_name__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__magic_name__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __snake_case ( self :List[str] ) ->Tuple:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __magic_name__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __magic_name__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , __magic_name__ )
def __snake_case ( self :Dict ) ->List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : Optional[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase : Optional[Any] = f"""{text_of_1_token} {text_of_1_token}"""
lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : List[Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : List[Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : List[str] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ), len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Optional[int] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__magic_name__ ), len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : Optional[int] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Union[str, Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : int = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Optional[int] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ), 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
lowercase : str = self.rust_tokenizer_class.from_pretrained(
__magic_name__ , use_fast=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ )
lowercase : Any = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__magic_name__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__magic_name__ ), 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , )
| 264
| 0
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a__ (__lowercase :Any ) -> List[str]:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase__ ( __snake_case ):
@staticmethod
def A__ ( A__ ):
_A : List[Any] = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' ,type=A__ ,default=A__ ,help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' ,action='''store_true''' ,help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' ,action='''store_true''' ,help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' ,)
download_parser.add_argument('''model''' ,type=A__ ,help='''Name of the model to download''' )
download_parser.set_defaults(func=A__ )
def __init__( self ,A__ ,A__ ,A__ ,A__ ):
_A : int = model
_A : Dict = cache
_A : int = force
_A : Union[str, Any] = trust_remote_code
def A__ ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 332
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def a__ (__lowercase :str , __lowercase :int ) -> Dict:
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a__ (__lowercase :Union[str, Any] , __lowercase :str , __lowercase :str , __lowercase :str ) -> int:
_A : str = tmp_path / '''cache'''
_A : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_sql_dataset(__lowercase , __lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a__ (__lowercase :Dict , __lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :int ) -> List[str]:
_A : Union[str, Any] = tmp_path / '''cache'''
_A : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_A : Tuple = features.copy() if features else default_expected_features
_A : Union[str, Any] = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : int = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowercase , cache_dir=__lowercase ).read()
_check_sql_dataset(__lowercase , __lowercase )
def a__ (__lowercase :Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(__lowercase ) ) as con:
_A : List[str] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def a__ (__lowercase :Tuple , __lowercase :List[str] , __lowercase :Tuple ) -> str:
_A : Optional[int] = tmp_path / '''cache'''
_A : Dict = os.path.join(__lowercase , '''tmp.sql''' )
_A : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_A : Dict = iter_sql_file(__lowercase )
_A : Any = iter_sql_file(__lowercase )
for rowa, rowa in zip(__lowercase , __lowercase ):
assert rowa == rowa
@require_sqlalchemy
def a__ (__lowercase :Optional[Any] , __lowercase :Tuple , __lowercase :Union[str, Any] ) -> Union[str, Any]:
_A : Optional[Any] = tmp_path / '''cache'''
_A : Union[str, Any] = os.path.join(__lowercase , '''tmp.sql''' )
_A : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_A : Union[str, Any] = iter_sql_file(__lowercase )
_A : str = iter_sql_file(__lowercase )
for rowa, rowa in zip(__lowercase , __lowercase ):
assert rowa == rowa
@require_sqlalchemy
def a__ (__lowercase :Union[str, Any] , __lowercase :Dict , __lowercase :int ) -> Any:
_A : Optional[int] = tmp_path / '''cache'''
_A : Optional[Any] = os.path.join(__lowercase , '''tmp.sql''' )
_A : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowercase ).read()
with pytest.raises(__lowercase ):
SqlDatasetWriter(__lowercase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 332
| 1
|
'''simple docstring'''
from statistics import mean, stdev
def __snake_case ( UpperCAmelCase_ : list , UpperCAmelCase_ : int = 3 ):
lowerCamelCase_ = min(__a )
lowerCamelCase_ = max(__a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a ) for x in data]
def __snake_case ( UpperCAmelCase_ : list , UpperCAmelCase_ : int = 3 ):
lowerCamelCase_ = mean(__a )
lowerCamelCase_ = stdev(__a )
# standardize data
return [round((x - mu) / (sigma) , __a ) for x in data]
| 675
|
"""simple docstring"""
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : str = 0
for i in range(1 , 10_01 ):
total += i**i
return str(__a )[-10:]
if __name__ == "__main__":
print(solution())
| 437
| 0
|
from __future__ import annotations
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
snake_case_ = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
SCREAMING_SNAKE_CASE_ : str = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : List[Any] = "<|endoftext|>" if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : Dict = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : Tuple = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : int = "<pad>" if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Any = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[int] = remove_space
SCREAMING_SNAKE_CASE_ : int = keep_accents
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : int = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
F"[{''.join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]" )
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Dict = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.non_printing_characters_re.sub("" , lowercase__ )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : List[str] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : List[Any] = unicodedata.normalize("NFC" , lowercase__ )
return text
def __lowerCamelCase ( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.preprocess_text(lowercase__ )
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase__ )
@staticmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
return out_string
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = ""
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = []
else:
current_sub_tokens.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.preprocess_text(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.encode(lowercase__ )
else:
SCREAMING_SNAKE_CASE_ : str = [self.preprocess_text(lowercase__ ) for t in text]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.encode(lowercase__ )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : str = torch.tensor(lowercase__ )
return token_ids
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.decode(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : List[str] = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(lowercase__ ) + F"{self.bos_token}Bot:"
)
return self.encode(text=lowercase__ )
| 68
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Optional[Any] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "altclip_text_model"
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=25_0002 , SCREAMING_SNAKE_CASE_ : List[Any]=1024 , SCREAMING_SNAKE_CASE_ : int=24 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : List[Any]=4096 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=514 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-05 , SCREAMING_SNAKE_CASE_ : List[str]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : List[Any]="absolute" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=768 , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = initializer_factor
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = project_dim
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "altclip_vision_model"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=768 , SCREAMING_SNAKE_CASE_ : Optional[int]=3072 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Tuple=224 , SCREAMING_SNAKE_CASE_ : Any=32 , SCREAMING_SNAKE_CASE_ : Any="quick_gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE_ : Any=1.0 , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__snake_case = hidden_size
__snake_case = intermediate_size
__snake_case = projection_dim
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = num_channels
__snake_case = patch_size
__snake_case = image_size
__snake_case = initializer_range
__snake_case = initializer_factor
__snake_case = attention_dropout
__snake_case = layer_norm_eps
__snake_case = hidden_act
@classmethod
def a ( cls : str , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__snake_case = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = "altclip"
_SCREAMING_SNAKE_CASE : str = True
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=768 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2.6_5_9_2 , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__snake_case = kwargs.pop('text_config_dict' , SCREAMING_SNAKE_CASE_ )
__snake_case = kwargs.pop('vision_config_dict' , SCREAMING_SNAKE_CASE_ )
super().__init__(**SCREAMING_SNAKE_CASE_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__snake_case = {}
# This is the complete result when using `text_config_dict`.
__snake_case = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__snake_case = (
f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
f'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
__snake_case = (
f'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
f'value `text_config["{key}"]` will be overriden.'
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__snake_case = {}
# This is the complete result when using `vision_config_dict`.
__snake_case = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__snake_case = {
str(SCREAMING_SNAKE_CASE_ ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__snake_case = (
f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
__snake_case = (
f'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
f'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(SCREAMING_SNAKE_CASE_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__snake_case = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__snake_case = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__snake_case = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE_ )
__snake_case = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE_ )
__snake_case = projection_dim
__snake_case = logit_scale_init_value
__snake_case = 1.0
@classmethod
def a ( cls : Dict , SCREAMING_SNAKE_CASE_ : AltCLIPTextConfig , SCREAMING_SNAKE_CASE_ : AltCLIPVisionConfig , **SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def a ( self : Tuple ) -> List[Any]:
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.text_config.to_dict()
__snake_case = self.vision_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 56
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__: List[Any] = logging.get_logger(__name__)
A__: str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__: List[Any] = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
A__: str = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
A__: int = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = RoFormerTokenizer
def __init__( self: int , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Any=None , __lowerCamelCase: str=True , __lowerCamelCase: Any="[UNK]" , __lowerCamelCase: int="[SEP]" , __lowerCamelCase: Optional[int]="[PAD]" , __lowerCamelCase: Optional[int]="[CLS]" , __lowerCamelCase: Tuple="[MASK]" , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[Any]=None , **__lowerCamelCase: Dict , ):
'''simple docstring'''
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase__: int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
):
UpperCamelCase__: int = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
UpperCamelCase__: Any = do_lower_case
UpperCamelCase__: Optional[int] = strip_accents
UpperCamelCase__: Any = pre_tok_class(**__lowerCamelCase )
UpperCamelCase__: Tuple = do_lower_case
def __getstate__( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.__dict__.copy()
UpperCamelCase__: Dict = BertPreTokenizer()
return state
def __setstate__( self: Dict , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = d
UpperCamelCase__: List[Any] = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__: str = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict=None ):
'''simple docstring'''
UpperCamelCase__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__: Tuple = [self.sep_token_id]
UpperCamelCase__: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__: Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=None , __lowerCamelCase: List[str]=False , **__lowerCamelCase: int , ):
'''simple docstring'''
UpperCamelCase__: List[str] = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 380
| 0
|
from __future__ import annotations
UpperCamelCase = '''Muhammad Umer Farooq'''
UpperCamelCase = '''MIT'''
UpperCamelCase = '''1.0.0'''
UpperCamelCase = '''Muhammad Umer Farooq'''
UpperCamelCase = '''contact@muhammadumerfarooq.me'''
UpperCamelCase = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __UpperCAmelCase (_UpperCAmelCase ):
'''simple docstring'''
def __init__( self: int , UpperCAmelCase_: str ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = domain
def UpperCamelCase ( self: int , UpperCAmelCase_: str , UpperCAmelCase_: list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_SCREAMING_SNAKE_CASE = parse.urljoin(self.domain , UpperCAmelCase_ )
self.urls.append(UpperCAmelCase_ )
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(snake_case__ ).split(""".""" )[-2:] )
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
return parse.urlparse(snake_case__ ).netloc
def __lowerCamelCase ( snake_case__ = "https://github.com" ) -> list[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_domain_name(snake_case__ )
# Initialize the parser
_SCREAMING_SNAKE_CASE = Parser(snake_case__ )
try:
# Open URL
_SCREAMING_SNAKE_CASE = requests.get(snake_case__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_SCREAMING_SNAKE_CASE = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_SCREAMING_SNAKE_CASE = requests.get(snake_case__ )
# Get the valid email.
_SCREAMING_SNAKE_CASE = re.findall("""[a-zA-Z0-9]+@""" + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(snake_case__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = emails_from_url('''https://github.com''')
print(f"{len(emails)} emails found:")
print('''\n'''.join(sorted(emails)))
| 712
|
UpperCamelCase = 256
# Modulus to hash a string
UpperCamelCase = 1_000_003
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = len(snake_case__ )
if p_len > t_len:
return False
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_SCREAMING_SNAKE_CASE = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_SCREAMING_SNAKE_CASE = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_SCREAMING_SNAKE_CASE = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """abc1abc12"""
_SCREAMING_SNAKE_CASE = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_SCREAMING_SNAKE_CASE = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(snake_case__ ,snake_case__ ) and not rabin_karp(snake_case__ ,snake_case__ )
# Test 2)
_SCREAMING_SNAKE_CASE = """ABABX"""
_SCREAMING_SNAKE_CASE = """ABABZABABYABABX"""
assert rabin_karp(snake_case__ ,snake_case__ )
# Test 3)
_SCREAMING_SNAKE_CASE = """AAAB"""
_SCREAMING_SNAKE_CASE = """ABAAAAAB"""
assert rabin_karp(snake_case__ ,snake_case__ )
# Test 4)
_SCREAMING_SNAKE_CASE = """abcdabcy"""
_SCREAMING_SNAKE_CASE = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(snake_case__ ,snake_case__ )
# Test 5)
_SCREAMING_SNAKE_CASE = """Lü"""
_SCREAMING_SNAKE_CASE = """Lüsai"""
assert rabin_karp(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = """Lue"""
assert not rabin_karp(snake_case__ ,snake_case__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 569
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
__a : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a : str = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a : Optional[Any] = ''
else:
__a : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : int = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
__a : Any = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__a : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
__a : List[str] = in_proj_bias[: config.hidden_size]
__a : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__a : Tuple = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : Optional[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__a : str = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Tuple = dct.pop(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = val
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : int = ViTMSNConfig()
__a : Dict = 1000
__a : str = 'datasets/huggingface/label-files'
__a : Union[str, Any] = 'imagenet-1k-id2label.json'
__a : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'r' ) )
__a : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__a : Tuple = idalabel
__a : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__a : Any = 384
__a : Union[str, Any] = 1536
__a : str = 6
elif "l16" in checkpoint_url:
__a : List[Any] = 1024
__a : List[Any] = 4096
__a : List[Any] = 24
__a : Dict = 16
__a : int = 0.1
elif "b4" in checkpoint_url:
__a : Optional[int] = 4
elif "l7" in checkpoint_url:
__a : Dict = 7
__a : Union[str, Any] = 1024
__a : Any = 4096
__a : Optional[Any] = 24
__a : str = 16
__a : Any = 0.1
__a : str = ViTMSNModel(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['target_encoder']
__a : Optional[int] = ViTImageProcessor(size=config.image_size )
remove_projection_head(SCREAMING_SNAKE_CASE__ )
__a : int = create_rename_keys(SCREAMING_SNAKE_CASE__ , base_model=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , base_model=SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
__a : List[str] = ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ )
__a : Any = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__a : List[str] = model(**SCREAMING_SNAKE_CASE__ )
__a : List[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__a : List[str] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__a : int = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__a : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__a : List[str] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__a : Union[str, Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 597
|
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE_ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
SCREAMING_SNAKE_CASE_ = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
SCREAMING_SNAKE_CASE_ = "|".join(sys.argv[1:])
SCREAMING_SNAKE_CASE_ = re.compile(rF"^({joined_dirs}).*?\.py$")
SCREAMING_SNAKE_CASE_ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 597
| 1
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
class UpperCAmelCase ( __snake_case ):
a: Dict = ["input_values", "attention_mask"]
def __init__( self: Tuple , __UpperCamelCase: int = 1 , __UpperCamelCase: int = 1_6000 , __UpperCamelCase: float = 0.0 , __UpperCamelCase: bool = False , __UpperCamelCase: int = 80 , __UpperCamelCase: int = 16 , __UpperCamelCase: int = 64 , __UpperCamelCase: str = "hann_window" , __UpperCamelCase: float = 1.0 , __UpperCamelCase: float = 80 , __UpperCamelCase: float = 7600 , __UpperCamelCase: float = 1E-10 , __UpperCamelCase: int = 2 , __UpperCamelCase: bool = True , **__UpperCamelCase: Any , ):
super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase )
_a = do_normalize
_a = return_attention_mask
_a = num_mel_bins
_a = hop_length
_a = win_length
_a = win_function
_a = frame_signal_scale
_a = fmin
_a = fmax
_a = mel_floor
_a = reduction_factor
_a = win_length * sampling_rate // 1000
_a = hop_length * sampling_rate // 1000
_a = optimal_fft_length(self.sample_size )
_a = (self.n_fft // 2) + 1
_a = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase )
_a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , __UpperCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , __UpperCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _A ( __UpperCamelCase: List[np.ndarray] , __UpperCamelCase: List[np.ndarray] , __UpperCamelCase: float = 0.0 ):
if attention_mask is not None:
_a = np.array(__UpperCamelCase , np.intaa )
_a = []
for vector, length in zip(__UpperCamelCase , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__UpperCamelCase )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _A ( self: Optional[int] , __UpperCamelCase: np.ndarray , ):
_a = spectrogram(
__UpperCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self: Union[str, Any] , __UpperCamelCase: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCamelCase: Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __UpperCamelCase: Union[bool, str, PaddingStrategy] = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , __UpperCamelCase: Optional[int] = None , **__UpperCamelCase: Optional[Any] , ):
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
_a = self._process_audio(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )
else:
_a = None
if audio_target is not None:
_a = self._process_audio(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase , )
if inputs is None:
return inputs_target
else:
_a = inputs_target['''input_values''']
_a = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
_a = decoder_attention_mask
return inputs
def _A ( self: Any , __UpperCamelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase: bool = False , __UpperCamelCase: Union[bool, str, PaddingStrategy] = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , **__UpperCamelCase: str , ):
_a = isinstance(__UpperCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_a = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_a = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_a = speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [speech]
# needed to make pad() work on spectrogram inputs
_a = self.feature_size
# convert into correct format for padding
if is_target:
_a = [self._extract_mel_features(__UpperCamelCase ) for waveform in speech]
_a = BatchFeature({'''input_values''': features} )
_a = self.num_mel_bins
else:
_a = BatchFeature({'''input_values''': speech} )
_a = self.pad(
__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
_a = feature_size_hack
# convert input values to correct format
_a = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
_a = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__UpperCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_a = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__UpperCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_a = input_values.astype(np.floataa )
# convert attention_mask to correct format
_a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_a = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_a = (
attention_mask
if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_a = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=__UpperCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__UpperCamelCase )
return padded_inputs
def _A ( self: List[Any] ):
_a = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_a = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 346
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __snake_case ( _UpperCamelCase=None ) -> List[str]:
_a = argparse.ArgumentParser(add_help=_UpperCamelCase , allow_abbrev=_UpperCamelCase )
# The main config parser
_a = config_command_parser(_UpperCamelCase )
# The subparser to add commands to
_a = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(_UpperCamelCase , parents=[parent_parser] )
update_command_parser(_UpperCamelCase , parents=[parent_parser] )
return config_parser
def __snake_case ( ) -> Optional[Any]:
_a = get_config_parser()
_a = config_parser.parse_args()
if not hasattr(_UpperCamelCase , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 346
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :List[Any]=3 , lowerCamelCase_ :List[Any]=1_8 , lowerCamelCase_ :Dict=3_0 , lowerCamelCase_ :Union[str, Any]=4_0_0 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=None , lowerCamelCase_ :str=True , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Union[str, Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ :Optional[Any]=[0.5, 0.5, 0.5] , ) -> int:
"""simple docstring"""
UpperCamelCase__ = size if size is not None else {"shortest_edge": 1_8}
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
def lowerCamelCase__ ( self :Optional[int] ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
A = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = LevitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self :List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase_ , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase_ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase_ , "size" ) )
def lowerCamelCase__ ( self :Dict ) -> str:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase__ ( self :int ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase__ ( self :str ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__ = image_processing(lowerCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self :Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__ = image_processing(lowerCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__ = image_processing(lowerCamelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 516
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Union[str, Any] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['ConditionalDetrFeatureExtractor']
A : str = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 516
| 1
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def __A ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6_0_0_0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = int(round(sample_rate * max_length ) )
if len(_SCREAMING_SNAKE_CASE ) <= sample_length:
return wav
__SCREAMING_SNAKE_CASE : int = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
snake_case__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
snake_case__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
snake_case__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
snake_case__ : str = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
snake_case__ : str = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
snake_case__ : str = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
snake_case__ : str = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
snake_case__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
snake_case__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
snake_case__ : float = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __lowerCamelCase :
'''simple docstring'''
snake_case__ : str = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
snake_case__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
snake_case__ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Name or path of preprocessor config.'''} )
snake_case__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
snake_case__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
snake_case__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
snake_case__ : Optional[bool] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
snake_case__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def a_ ( self ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , a__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Any = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
__SCREAMING_SNAKE_CASE : List[str] = DatasetDict()
__SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
f'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__SCREAMING_SNAKE_CASE : List[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__SCREAMING_SNAKE_CASE : Any = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__SCREAMING_SNAKE_CASE : Tuple = feature_extractor.model_input_names[0]
def train_transforms(_SCREAMING_SNAKE_CASE : List[Any] ):
__SCREAMING_SNAKE_CASE : Tuple = []
for audio in batch[data_args.audio_column_name]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
__SCREAMING_SNAKE_CASE : List[Any] = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )}
__SCREAMING_SNAKE_CASE : Tuple = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_SCREAMING_SNAKE_CASE : Tuple ):
__SCREAMING_SNAKE_CASE : Dict = [audio["array"] for audio in batch[data_args.audio_column_name]]
__SCREAMING_SNAKE_CASE : str = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
__SCREAMING_SNAKE_CASE : Optional[int] = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )}
__SCREAMING_SNAKE_CASE : Any = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__SCREAMING_SNAKE_CASE : List[Any] = raw_datasets["train"].features[data_args.label_column_name].names
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = {}, {}
for i, label in enumerate(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : List[Any] = str(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = label
# Load the accuracy metric from the datasets package
__SCREAMING_SNAKE_CASE : str = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE : str ):
__SCREAMING_SNAKE_CASE : int = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids )
__SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE : List[str] = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE : str = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE )
# Initialize our trainer
__SCREAMING_SNAKE_CASE : Optional[int] = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = last_checkpoint
__SCREAMING_SNAKE_CASE : Any = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__SCREAMING_SNAKE_CASE : List[Any] = trainer.evaluate()
trainer.log_metrics("eval" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 564
|
'''simple docstring'''
import os
import numpy
import onnx
def __A ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = a.name
__SCREAMING_SNAKE_CASE : List[Any] = b.name
__SCREAMING_SNAKE_CASE : int = ""
__SCREAMING_SNAKE_CASE : str = ""
__SCREAMING_SNAKE_CASE : List[Any] = a == b
__SCREAMING_SNAKE_CASE : Any = name_a
__SCREAMING_SNAKE_CASE : Optional[Any] = name_b
return res
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = list(model.graph.initializer )
__SCREAMING_SNAKE_CASE : Tuple = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__SCREAMING_SNAKE_CASE : str = inits[i].name
__SCREAMING_SNAKE_CASE : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = os.path.dirname(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = os.path.basename(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = onnx.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE : Dict = list(model.graph.initializer )
__SCREAMING_SNAKE_CASE : int = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : str = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_SCREAMING_SNAKE_CASE )
dup_set.add(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = inits[j].data_type
__SCREAMING_SNAKE_CASE : Any = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("unexpected data type: " , _SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
__SCREAMING_SNAKE_CASE : Any = inits[i].name
__SCREAMING_SNAKE_CASE : Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : List[Any] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , "GB" )
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(_SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Any = "optimized_" + model_file_name
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
onnx.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return new_model
| 564
| 1
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(a):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(a):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**a):
return model(**a)
eval(**a).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxRobertaModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**a):
return model(**a)
eval(**a).block_until_ready()
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
with self.assertRaisesRegex(
a , 'bert-base is not a local folder and is not a valid model identifier'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
with self.assertRaisesRegex(
a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a , revision='aaaaaa')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
with self.assertRaisesRegex(
a , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
with self.assertRaisesRegex(a , 'Use `from_pt=True` to load this model'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
| 73
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2 ) )
else:
return a * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2 ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return actual_power(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(power(-2, -3))
| 533
| 0
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
snake_case__ : Tuple = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
snake_case__ : Union[str, Any] = 10
snake_case__ : Dict = 256
def __lowerCamelCase ( A__ : List[str] ) -> Optional[MinHash]:
if len(A__ ) < MIN_NUM_TOKENS:
return None
lowerCamelCase_ : Tuple = MinHash(num_perm=A__ )
for token in set(A__ ):
min_hash.update(token.encode() )
return min_hash
def __lowerCamelCase ( A__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(A__ ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , *,
__a : float = 0.85 , ) ->List[Any]:
lowerCamelCase_ : str = duplication_jaccard_threshold
lowerCamelCase_ : Optional[int] = NUM_PERM
lowerCamelCase_ : List[str] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase_ : Tuple = defaultdict(__a )
def _lowerCAmelCase ( self : Dict , __a : Tuple , __a : MinHash ) ->None:
lowerCamelCase_ : Dict = self._index.query(__a )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def _lowerCAmelCase ( self : Optional[Any] ) ->List[List[Dict]]:
lowerCamelCase_ : Dict = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase_ : List[Any] = [base] + list(__a )
# reformat the cluster to be a list of dict
lowerCamelCase_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def _lowerCAmelCase ( self : List[Any] , __a : Union[str, Any] ) ->None:
lowerCamelCase_ : int = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def __lowerCamelCase ( A__ : Optional[int] ) -> str:
lowerCamelCase_, lowerCamelCase_ : Any = element
lowerCamelCase_ : Optional[int] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __lowerCamelCase ( A__ : Type[Dataset] ) -> Dict:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(A__ , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def __lowerCamelCase ( A__ : Type[Dataset] , A__ : float ) -> Optional[int]:
lowerCamelCase_ : Tuple = DuplicationIndex(duplication_jaccard_threshold=A__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A__ ) ) , max_queue_size=100 ) ):
di.add(A__ , A__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __lowerCamelCase ( A__ : str , A__ : str ) -> float:
lowerCamelCase_ : Union[str, Any] = get_tokens(A__ )
lowerCamelCase_ : Dict = get_tokens(A__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
snake_case__ : List[Any] = None
def __lowerCamelCase ( A__ : List[str] , A__ : Optional[int] ) -> Tuple:
lowerCamelCase_ : List[str] = []
for elementa in cluster:
lowerCamelCase_ : List[Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowerCamelCase_ : List[str] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(A__ , A__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase_ : Optional[int] = 1
extremes.append(A__ )
return extremes
def __lowerCamelCase ( A__ : str , A__ : int , A__ : List[Any] ) -> Tuple:
global _shared_dataset
lowerCamelCase_ : Optional[int] = dataset
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Union[str, Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=A__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A__ , A__ , ) , total=len(A__ ) , ):
extremes_list.append(A__ )
return extremes_list
def __lowerCamelCase ( A__ : Type[Dataset] , A__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
lowerCamelCase_ : int = make_duplicate_clusters(A__ , A__ )
lowerCamelCase_ : Optional[Any] = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase_ : Any = {}
lowerCamelCase_ : Tuple = find_extremes(A__ , A__ , A__ )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase_ : Optional[int] = element
lowerCamelCase_ : List[str] = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase_ : int = dataset.filter(lambda A__ , A__ : idx not in remove_indices , with_indices=A__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase_ : Optional[int] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowerCamelCase_ : Optional[Any] = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(A__ )}''' )
print(f'''Number of duplicate clusters: {len(A__ )}''' )
print(f'''Files in duplicate cluster: {len(A__ )}''' )
print(f'''Unique files in duplicate cluster: {len(A__ )}''' )
print(f'''Filtered dataset size: {len(A__ )}''' )
return ds_filter, duplicate_clusters
| 171
|
import fire
from utils import calculate_rouge, save_json
def __lowerCamelCase ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None , **A__ : Dict ) -> str:
lowerCamelCase_ : Union[str, Any] = [x.strip() for x in open(A__ ).readlines()]
lowerCamelCase_ : Union[str, Any] = [x.strip() for x in open(A__ ).readlines()][: len(A__ )]
lowerCamelCase_ : int = calculate_rouge(A__ , A__ , **A__ )
if save_path is not None:
save_json(A__ , A__ , indent=A__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 171
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __lowercase (_A ):
_UpperCamelCase = 'blenderbot-small'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A_=5_0265 , A_=512 , A_=8 , A_=2048 , A_=16 , A_=8 , A_=2048 , A_=16 , A_=0.0 , A_=0.0 , A_=True , A_=True , A_="gelu" , A_=512 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1 , A_=False , A_=0 , A_=1 , A_=2 , A_=2 , **A_ , ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : Optional[int] = d_model
__lowerCAmelCase : List[str] = encoder_ffn_dim
__lowerCAmelCase : Union[str, Any] = encoder_layers
__lowerCAmelCase : List[str] = encoder_attention_heads
__lowerCAmelCase : List[Any] = decoder_ffn_dim
__lowerCAmelCase : Dict = decoder_layers
__lowerCAmelCase : List[Any] = decoder_attention_heads
__lowerCAmelCase : Optional[int] = dropout
__lowerCAmelCase : int = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : Dict = activation_function
__lowerCAmelCase : Union[str, Any] = init_std
__lowerCAmelCase : Optional[Any] = encoder_layerdrop
__lowerCAmelCase : str = decoder_layerdrop
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
class __lowercase (_A ):
@property
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : Dict = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCAmelCase : int = {0: '''batch'''}
__lowerCAmelCase : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowerCAmelCase : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
__lowerCAmelCase : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCAmelCase : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCAmelCase, __lowerCAmelCase : Dict = self.num_layers
for i in range(_a ):
__lowerCAmelCase : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCAmelCase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowerCAmelCase : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : List[str] = super().outputs
else:
__lowerCAmelCase : Tuple = super(_a , self ).outputs
if self.use_past:
__lowerCAmelCase, __lowerCAmelCase : Optional[Any] = self.num_layers
for i in range(_a ):
__lowerCAmelCase : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCAmelCase : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCamelCase__ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a , _a , _a , _a , _a )
# Generate decoder inputs
__lowerCAmelCase : Any = seq_length if not self.use_past else 1
__lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a , _a , _a , _a , _a )
__lowerCAmelCase : Dict = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__lowerCAmelCase : Any = dict(**_a , **_a )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCAmelCase, __lowerCAmelCase : Tuple = common_inputs['''input_ids'''].shape
__lowerCAmelCase : Tuple = common_inputs['''decoder_input_ids'''].shape[1]
__lowerCAmelCase, __lowerCAmelCase : List[str] = self.num_attention_heads
__lowerCAmelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase : Union[str, Any] = decoder_seq_length + 3
__lowerCAmelCase : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCAmelCase : Optional[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_a , _a )] , dim=1 )
__lowerCAmelCase : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCAmelCase, __lowerCAmelCase : str = self.num_layers
__lowerCAmelCase : List[str] = min(_a , _a )
__lowerCAmelCase : str = max(_a , _a ) - min_num_layers
__lowerCAmelCase : Optional[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_a ):
common_inputs["past_key_values"].append(
(
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
) )
# TODO: test this.
__lowerCAmelCase : str = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_a , _a ):
common_inputs["past_key_values"].append((torch.zeros(_a ), torch.zeros(_a )) )
return common_inputs
def UpperCamelCase__ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a , _a , _a , _a , _a )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowerCAmelCase : Optional[int] = seqlen + 2
__lowerCAmelCase, __lowerCAmelCase : Dict = self.num_layers
__lowerCAmelCase, __lowerCAmelCase : Any = self.num_attention_heads
__lowerCAmelCase : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase : Union[str, Any] = common_inputs['''attention_mask'''].dtype
__lowerCAmelCase : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
__lowerCAmelCase : Optional[int] = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(_a )
]
return common_inputs
def UpperCamelCase__ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Tuple = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase : int = tokenizer.num_special_tokens_to_add(_a )
__lowerCAmelCase : Dict = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase : Optional[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCAmelCase : Union[str, Any] = dict(tokenizer(_a , return_tensors=_a ) )
return common_inputs
def UpperCamelCase__ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) ->List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
elif self.task == "causal-lm":
__lowerCAmelCase : str = self._generate_dummy_inputs_for_causal_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
else:
__lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
return common_inputs
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ ) ->str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : List[Any] = super()._flatten_past_key_values_(_a , _a , _a , _a )
else:
__lowerCAmelCase : Any = super(_a , self )._flatten_past_key_values_(
_a , _a , _a , _a )
| 492
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase_ ( a : Optional[int] , a : Tuple=False ):
try:
a__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ = default
else:
# KEY is set, convert it to True or False.
try:
a__ = strtobool(a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__A : Optional[int] = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skip('Test was skipped' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(a )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(a )
def lowerCAmelCase_ ( a : str ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(a )
def lowerCAmelCase_ ( a : Optional[int] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(a )
def lowerCAmelCase_ ( a : Optional[Any] ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(a )
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(a )
def lowerCAmelCase_ ( a : Tuple ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(a )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(a )
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(a )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(a )
def lowerCAmelCase_ ( a : Tuple ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(a )
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(a )
def lowerCAmelCase_ ( a : int=None , a : Dict=None ):
if test_case is None:
return partial(a , version=a )
return unittest.skipUnless(is_torch_version('>=' , a ) , f'''test requires torch version >= {version}''' )(a )
def lowerCAmelCase_ ( a : Any ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(a )
def lowerCAmelCase_ ( a : str ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(a )
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(a )
__A : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(a )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int = True
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
a__ = tempfile.mkdtemp()
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase__ ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase_ ( a : List[str] ):
a__ = AcceleratorState()
a__ = tensor[None].clone().to(state.device )
a__ = gather(a ).cpu()
a__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , a ):
return False
return True
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
a__ = returncode
a__ = stdout
a__ = stderr
async def lowerCAmelCase_ ( a : Any , a : int ):
while True:
a__ = await stream.readline()
if line:
callback(a )
else:
break
async def lowerCAmelCase_ ( a : int , a : Tuple=None , a : Optional[Any]=None , a : Tuple=None , a : str=False , a : Dict=False ):
if echo:
print('\nRunning: ' , ' '.join(a ) )
a__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ = []
a__ = []
def tee(a : str , a : Optional[Any] , a : Any , a : Optional[int]="" ):
a__ = line.decode('utf-8' ).rstrip()
sink.append(a )
if not quiet:
print(a , a , file=a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a : tee(a , a , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a : tee(a , a , sys.stderr , label='stderr:' ) ) ),
] , timeout=a , )
return _RunOutput(await p.wait() , a , a )
def lowerCAmelCase_ ( a : Union[str, Any] , a : str=None , a : Dict=None , a : List[Any]=180 , a : Optional[Any]=False , a : int=True ):
a__ = asyncio.get_event_loop()
a__ = loop.run_until_complete(
_stream_subprocess(a , env=a , stdin=a , timeout=a , quiet=a , echo=a ) )
a__ = ' '.join(a )
if result.returncode > 0:
a__ = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _UpperCamelCase ( _A ):
'''simple docstring'''
pass
def lowerCAmelCase_ ( a : List[str] , a : Dict=False ):
try:
a__ = subprocess.check_output(a , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(a , 'decode' ):
a__ = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(a )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 394
| 0
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCAmelCase =0B101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCAmelCase =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark("""bits""" ,self.watermark )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Any:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_5_6:
return images
A = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
A = [self.encoder.encode(lowerCamelCase_ ,"""dwtDct""" ) for image in images]
A = torch.from_numpy(np.array(lowerCamelCase_ ) ).permute(0 ,3 ,1 ,2 )
A = torch.clamp(2 * (images / 2_5_5 - 0.5) ,min=-1.0 ,max=1.0 )
return images
| 704
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCAmelCase =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _A ( _a : Tuple , _a : str , _a : int , _a : Dict , _a : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
A = getattr(_a , _a )
if weight_type is not None:
A = getattr(_a , _a ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _A ( _a : Union[str, Any] , _a : str ):
"""simple docstring"""
A = []
A = fairseq_model.state_dict()
A = hf_model.feature_extractor
A = hf_model.adapter
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == """group""" , )
A = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(_a , _a , _a , _a )
A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A = True
if "*" in mapped_key:
A = name.split(_a )[0].split(""".""" )[-2]
A = mapped_key.replace("""*""" , _a )
if "weight_g" in name:
A = """weight_g"""
elif "weight_v" in name:
A = """weight_v"""
elif "bias" in name:
A = """bias"""
elif "weight" in name:
A = """weight"""
else:
A = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f'Unused weights: {unused_weights}' )
def _A ( _a : int , _a : Optional[int] , _a : Any , _a : Union[str, Any] , _a : Tuple ):
"""simple docstring"""
A = full_name.split("""conv_layers.""" )[-1]
A = name.split(""".""" )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_a )
def _A ( _a : List[str] , _a : Any , _a : Union[str, Any] , _a : Optional[int] ):
"""simple docstring"""
A = full_name.split("""adaptor.""" )[-1]
A = name.split(""".""" )
if items[1].isdigit():
A = int(items[1] )
else:
A = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(_a , _a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(_a )
def _A ( _a : List[Any] ):
"""simple docstring"""
A , A = emb.weight.shape
A = nn.Linear(_a , _a , bias=_a )
A = emb.weight.data
return lin_layer
@torch.no_grad()
def _A ( _a : List[str] , _a : Tuple , _a : Dict , _a : Optional[Any] , _a : str , _a : Dict , _a : Optional[int] , _a : Optional[Any] , _a : Tuple , _a : int , _a : Tuple , ):
"""simple docstring"""
A = WavaVecaConfig.from_pretrained(
_a , add_adapter=_a , adapter_stride=_a , adapter_kernel_size=_a , use_auth_token=_a , output_hidden_size=_a , )
A = MBartConfig.from_pretrained(_a )
# load model
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
A = model[0].eval()
# load feature extractor
A = WavaVecaFeatureExtractor.from_pretrained(_a , use_auth_token=_a )
# set weights for wav2vec2 encoder
A = WavaVecaModel(_a )
recursively_load_weights_wavaveca(model.encoder , _a )
# load decoder weights
A = MBartForCausalLM(_a )
A , A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_a )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A = SpeechEncoderDecoderModel(encoder=_a , decoder=_a )
A = False
A = MBartaaTokenizer(_a )
tokenizer.save_pretrained(_a )
A = hf_wavavec.config.to_dict()
A = tokenizer.pad_token_id
A = tokenizer.bos_token_id
A = tokenizer.eos_token_id
A = """mbart50"""
A = """wav2vec2"""
A = tokenizer.eos_token_id
A = 2_5_0_0_0_4
A = tokenizer.eos_token_id
A = SpeechEncoderDecoderConfig.from_dict(_a )
hf_wavavec.save_pretrained(_a )
feature_extractor.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
UpperCAmelCase =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 255
| 0
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _snake_case ( A , A , A ) -> List[Any]:
lowerCAmelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase__ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
lowerCAmelCase__ = F"""{src_lang}-{tgt_lang}"""
lowerCAmelCase__ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = os.path.join(A , '''README.md''' )
print(F"""Generating {path}""" )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(A )
# make sure we are under the root of the project
__UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
__UpperCAmelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = model_name.split('''-''')
__UpperCAmelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 90
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowercase__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_tool("text-to-speech" )
self.tool.setup()
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 475
| 0
|
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCAmelCase_ ( A , A , A , A ):
'''simple docstring'''
_a : int = multiprocessing.Manager()
_a : Optional[Any] = manager.list()
_a : List[str] = multiprocessing.Process(target=lowercase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_a : Optional[int] = shutil.rmtree
_a : List[str] = os.rmdir
_a : List[Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_a : Optional[Any] = {}
with swallow_io():
with time_limit(lowercase_ ):
exec(lowercase_ , lowercase_ )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
_a : Dict = rmtree
_a : Any = rmdir
_a : Tuple = chdir
@contextlib.contextmanager
def UpperCAmelCase_ ( A ):
'''simple docstring'''
def signal_handler(A , A ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , lowercase_ )
signal.signal(signal.SIGALRM , lowercase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : int = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase_ ):
with contextlib.redirect_stderr(lowercase_ ):
with redirect_stdin(lowercase_ ):
yield
@contextlib.contextmanager
def UpperCAmelCase_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase_ ):
yield dirname
class a ( snake_case__ ):
'''simple docstring'''
pass
class a ( io.StringIO ):
'''simple docstring'''
def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
raise OSError
def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> int:
raise OSError
def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Dict:
raise OSError
def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
return False
class a ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCAmelCase : Any = '''stdin'''
@contextlib.contextmanager
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if root == ".":
yield
return
_a : List[str] = os.getcwd()
os.chdir(lowercase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase_ )
def UpperCAmelCase_ ( A=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_a : Dict = None
_a : List[Any] = None
import os
_a : List[str] = """1"""
_a : Optional[Any] = None
_a : List[Any] = None
_a : Tuple = None
_a : List[Any] = None
_a : int = None
_a : List[str] = None
_a : Union[str, Any] = None
_a : Any = None
_a : Any = None
_a : Optional[int] = None
_a : Union[str, Any] = None
_a : Tuple = None
_a : Tuple = None
_a : Any = None
_a : Dict = None
_a : Union[str, Any] = None
_a : Union[str, Any] = None
_a : str = None
_a : Any = None
_a : List[str] = None
_a : Tuple = None
_a : List[Any] = None
_a : Optional[Any] = None
_a : Union[str, Any] = None
_a : Union[str, Any] = None
_a : Any = None
_a : int = None
import shutil
_a : List[Any] = None
_a : List[str] = None
_a : Union[str, Any] = None
import subprocess
_a : Optional[Any] = None # type: ignore
_a : List[str] = None
import sys
_a : List[Any] = None
_a : Any = None
_a : int = None
_a : List[str] = None
_a : List[Any] = None
| 720
|
'''simple docstring'''
import qiskit
def UpperCAmelCase_ ( A = 2 ):
'''simple docstring'''
_a : Union[str, Any] = qubits
# Using Aer's simulator
_a : str = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_a : Tuple = qiskit.QuantumCircuit(A , A )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A ) ) , list(range(A ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_a : Optional[int] = qiskit.execute(A , A , shots=1_0_0_0 )
return job.result().get_counts(A )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 424
| 0
|
"""simple docstring"""
from math import sqrt
def __magic_name__ ( _lowerCamelCase : Any ):
__a : List[Any] = 0
for i in range(1 , int(sqrt(__UpperCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__UpperCAmelCase ):
total += i + n // i
elif i == sqrt(__UpperCAmelCase ):
total += i
return total - n
def __magic_name__ ( _lowerCamelCase : Tuple = 1_0_0_0_0 ):
__a : Any = sum(
i
for i in range(1 , __UpperCAmelCase )
if sum_of_divisors(sum_of_divisors(__UpperCAmelCase ) ) == i and sum_of_divisors(__UpperCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 581
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ChineseCLIPFeatureExtractor']
_A = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Optional[int] = ["pixel_values"]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = 1 / 255 , snake_case_ = True , snake_case_ = None , snake_case_ = True , **snake_case_ , ) -> None:
super().__init__(**snake_case_ )
_UpperCAmelCase = size if size is not None else {"shortest_edge": 224}
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = crop_size if crop_size is not None else {"height": 256, "width": 256}
_UpperCAmelCase = get_size_dict(snake_case_ , param_name="crop_size" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def __A ( self , snake_case_ , snake_case_ , snake_case_ = PIL.Image.BILINEAR , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase = get_resize_output_image_size(snake_case_ , size=size["shortest_edge"] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __A ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> np.ndarray:
_UpperCAmelCase = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size["height"], size["width"]) , data_format=snake_case_ , **snake_case_ )
def __A ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ) -> List[Any]:
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __A ( self , snake_case_ , snake_case_ = None ) -> np.ndarray:
return flip_channel_order(snake_case_ , data_format=snake_case_ )
def __A ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ) -> PIL.Image.Image:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(snake_case_ , param_name="crop_size" )
_UpperCAmelCase = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_UpperCAmelCase = [self.flip_channel_order(image=snake_case_ ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def __A ( self , snake_case_ , snake_case_ = None ) -> List[Any]:
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case_ ):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(snake_case_ ) ):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case_ )
_UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
_UpperCAmelCase = logits.argmax(dim=1 )
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 579
|
"""simple docstring"""
from timeit import timeit
def A__ ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def A__ ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
_UpperCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A__ ( ) -> None:
'''simple docstring'''
def do_benchmark(A__ ) -> None:
_UpperCAmelCase = "import __main__ as z"
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(A__ ) = }""" )
_UpperCAmelCase = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=A__ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(A__ ) = }""" )
_UpperCAmelCase = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=A__ , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 579
| 1
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Optional[int]=False ) -> Dict:
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(snake_case_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
snake_case_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( snake_case_ : Any ) -> str:
return unittest.skip('''Test was skipped''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Any ) -> Union[str, Any]:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Optional[int]:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Dict:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[str] ) -> Any:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : str ) -> List[str]:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[str] ) -> str:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : List[str] ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Dict ) -> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : int ) -> Dict:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Any ) -> Optional[Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : int ) -> Dict:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> int:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any]=None , snake_case_ : int=None ) -> Optional[Any]:
if test_case is None:
return partial(snake_case_ , version=snake_case_ )
return unittest.skipUnless(is_torch_version('''>=''' , snake_case_ ) , f"""test requires torch version >= {version}""" )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Any ) -> List[Any]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> List[Any]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(snake_case_ )
snake_case_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(snake_case_ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Any = True
@classmethod
def a (cls : Dict ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
@classmethod
def a (cls : Dict ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def a (self : Any ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a__ )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Any ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : int , a__ : Union[mock.Mock, List[mock.Mock]] ):
"""simple docstring"""
__snake_case = mocks if isinstance(a__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( snake_case_ : Tuple ) -> List[str]:
__snake_case = AcceleratorState()
__snake_case = tensor[None].clone().to(state.device )
__snake_case = gather(snake_case_ ).cpu()
__snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , snake_case_ ):
return False
return True
class SCREAMING_SNAKE_CASE__ :
def __init__(self : int , a__ : Optional[int] , a__ : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : List[str] ) -> Optional[Any]:
while True:
__snake_case = await stream.readline()
if line:
callback(snake_case_ )
else:
break
async def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=False , snake_case_ : List[str]=False ) -> _RunOutput:
if echo:
print('''\nRunning: ''' , ''' '''.join(snake_case_ ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : int="" ):
__snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(snake_case_ )
if not quiet:
print(snake_case_ , snake_case_ , file=snake_case_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda snake_case_ : tee(snake_case_ , snake_case_ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=snake_case_ , )
return _RunOutput(await p.wait() , snake_case_ , snake_case_ )
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Optional[Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=180 , snake_case_ : Tuple=False , snake_case_ : List[str]=True ) -> _RunOutput:
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(snake_case_ , env=snake_case_ , stdin=snake_case_ , timeout=snake_case_ , quiet=snake_case_ , echo=snake_case_ ) )
__snake_case = ''' '''.join(snake_case_ )
if result.returncode > 0:
__snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
pass
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Optional[Any]=False ) -> Dict:
try:
__snake_case = subprocess.check_output(snake_case_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case_ , '''decode''' ):
__snake_case = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{' '.join(snake_case_ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 592
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
snake_case_ = logging.getLogger(__name__)
snake_case_ = 'Hello world! cécé herlolip'
snake_case_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] ) -> Dict:
__snake_case = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__snake_case = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__snake_case = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__snake_case = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__snake_case = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__snake_case = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__snake_case = torch.tensor(snake_case_ ).unsqueeze(0 )
__snake_case = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__snake_case = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__snake_case = encoder_input_ids
__snake_case = decoder_input_ids
__snake_case = __snake_case = None
__snake_case = None
__snake_case = __snake_case = None
__snake_case = __snake_case = None
__snake_case = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__snake_case = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__snake_case = original.generator(snake_case_ )
__snake_case = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__snake_case = new_model.generator(snake_case_ )
__snake_case = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__snake_case = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__snake_case = torch.allclose(snake_case_ , snake_case_ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
snake_case_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 592
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 700
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=64 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = embedding_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
UpperCamelCase__ = model(snake_case , token_type_ids=snake_case )
UpperCamelCase__ = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForNextSentencePrediction(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MegatronBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = MegatronBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = MegatronBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
), (
UpperCamelCase__
),
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCamelCase : int = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
# test_resize_embeddings = False
_UpperCamelCase : List[Any] = False
def snake_case__ ( self , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
UpperCamelCase__ = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = MegatronBertModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case )
def UpperCamelCase_( _A :List[Any] )-> Optional[Any]:
return torch.tensor(
_A , dtype=torch.long , device=_A , )
__UpperCamelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("Model is not available." )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCamelCase__ = os.path.join(os.environ["MYDIR"] , snake_case )
UpperCamelCase__ = MegatronBertModel.from_pretrained(snake_case )
model.to(snake_case )
model.half()
UpperCamelCase__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCamelCase__ = model(snake_case )[0]
UpperCamelCase__ = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , snake_case )
UpperCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
UpperCamelCase__ = output[0, ii, jj]
UpperCamelCase__ = expected[3 * ii + jj]
UpperCamelCase__ = "ii={} jj={} a={} b={}".format(snake_case , snake_case , snake_case , snake_case )
self.assertTrue(math.isclose(snake_case , snake_case , rel_tol=snake_case , abs_tol=snake_case ) , msg=snake_case )
| 185
| 0
|
def lowerCamelCase__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowerCAmelCase_ = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
lowerCAmelCase_ = str(bin(__lowerCAmelCase ) )[2:]
lowerCAmelCase_ = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290
|
import argparse
import struct
import unittest
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = data
# Initialize hash values
lowerCAmelCase_ = [
0x6_A_0_9_E_6_6_7,
0xB_B_6_7_A_E_8_5,
0x3_C_6_E_F_3_7_2,
0xA_5_4_F_F_5_3_A,
0x5_1_0_E_5_2_7_F,
0x9_B_0_5_6_8_8_C,
0x1_F_8_3_D_9_A_B,
0x5_B_E_0_C_D_1_9,
]
# Initialize round constants
lowerCAmelCase_ = [
0x4_2_8_A_2_F_9_8,
0x7_1_3_7_4_4_9_1,
0xB_5_C_0_F_B_C_F,
0xE_9_B_5_D_B_A_5,
0x3_9_5_6_C_2_5_B,
0x5_9_F_1_1_1_F_1,
0x9_2_3_F_8_2_A_4,
0xA_B_1_C_5_E_D_5,
0xD_8_0_7_A_A_9_8,
0x1_2_8_3_5_B_0_1,
0x2_4_3_1_8_5_B_E,
0x5_5_0_C_7_D_C_3,
0x7_2_B_E_5_D_7_4,
0x8_0_D_E_B_1_F_E,
0x9_B_D_C_0_6_A_7,
0xC_1_9_B_F_1_7_4,
0xE_4_9_B_6_9_C_1,
0xE_F_B_E_4_7_8_6,
0x0_F_C_1_9_D_C_6,
0x2_4_0_C_A_1_C_C,
0x2_D_E_9_2_C_6_F,
0x4_A_7_4_8_4_A_A,
0x5_C_B_0_A_9_D_C,
0x7_6_F_9_8_8_D_A,
0x9_8_3_E_5_1_5_2,
0xA_8_3_1_C_6_6_D,
0xB_0_0_3_2_7_C_8,
0xB_F_5_9_7_F_C_7,
0xC_6_E_0_0_B_F_3,
0xD_5_A_7_9_1_4_7,
0x0_6_C_A_6_3_5_1,
0x1_4_2_9_2_9_6_7,
0x2_7_B_7_0_A_8_5,
0x2_E_1_B_2_1_3_8,
0x4_D_2_C_6_D_F_C,
0x5_3_3_8_0_D_1_3,
0x6_5_0_A_7_3_5_4,
0x7_6_6_A_0_A_B_B,
0x8_1_C_2_C_9_2_E,
0x9_2_7_2_2_C_8_5,
0xA_2_B_F_E_8_A_1,
0xA_8_1_A_6_6_4_B,
0xC_2_4_B_8_B_7_0,
0xC_7_6_C_5_1_A_3,
0xD_1_9_2_E_8_1_9,
0xD_6_9_9_0_6_2_4,
0xF_4_0_E_3_5_8_5,
0x1_0_6_A_A_0_7_0,
0x1_9_A_4_C_1_1_6,
0x1_E_3_7_6_C_0_8,
0x2_7_4_8_7_7_4_C,
0x3_4_B_0_B_C_B_5,
0x3_9_1_C_0_C_B_3,
0x4_E_D_8_A_A_4_A,
0x5_B_9_C_C_A_4_F,
0x6_8_2_E_6_F_F_3,
0x7_4_8_F_8_2_E_E,
0x7_8_A_5_6_3_6_F,
0x8_4_C_8_7_8_1_4,
0x8_C_C_7_0_2_0_8,
0x9_0_B_E_F_F_F_A,
0xA_4_5_0_6_C_E_B,
0xB_E_F_9_A_3_F_7,
0xC_6_7_1_7_8_F_2,
]
lowerCAmelCase_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __a ( _UpperCamelCase ) -> bytes:
lowerCAmelCase_ = B"\x80" + (B"\x00" * (63 - (len(_UpperCamelCase ) + 8) % 64))
lowerCAmelCase_ = struct.pack(">Q" , (len(_UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def __a ( self ) -> None:
# Convert into blocks of 64 bytes
lowerCAmelCase_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCAmelCase_ = list(struct.unpack(">16L" , _UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCAmelCase_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCAmelCase_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCAmelCase_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0_0_0_0_0_0_0_0
# Compression
lowerCAmelCase_ = self.ror(_UpperCamelCase , 6 ) ^ self.ror(_UpperCamelCase , 11 ) ^ self.ror(_UpperCamelCase , 25 )
lowerCAmelCase_ = (e & f) ^ ((~e & 0xF_F_F_F_F_F_F_F) & g)
lowerCAmelCase_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0_0_0_0_0_0_0_0
lowerCAmelCase_ = self.ror(_UpperCamelCase , 2 ) ^ self.ror(_UpperCamelCase , 13 ) ^ self.ror(_UpperCamelCase , 22 )
lowerCAmelCase_ = (a & b) ^ (a & c) ^ (b & c)
lowerCAmelCase_ = (sa + maj) % 0x1_0_0_0_0_0_0_0_0
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (
g,
f,
e,
((d + tempa) % 0x1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0),
)
lowerCAmelCase_ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCAmelCase_ = [
((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
lowerCAmelCase_ = "".join([hex(_UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
return 0xF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> None:
import hashlib
lowerCAmelCase_ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(_UpperCamelCase ).hash , hashlib.shaaaa(_UpperCamelCase ).hexdigest() )
def lowerCamelCase__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCAmelCase_ = f.read()
else:
lowerCAmelCase_ = bytes(__lowerCAmelCase , "utf-8" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 290
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _lowerCamelCase: list[list[int]] ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowerCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowerCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 600851475143 ) -> int:
'''simple docstring'''
try:
__lowerCamelCase : Optional[Any] = int(_lowerCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCamelCase : Dict = i
while n % i == 0:
__lowerCamelCase : Union[str, Any] = n // i
i += 1
return int(_lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 366
| 1
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: List[str]="shi-labs/oneformer_demo" ) -> Optional[Any]:
'''simple docstring'''
with open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) as f:
__lowerCamelCase : Dict = json.load(_UpperCAmelCase )
__lowerCamelCase : str = {}
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Optional[Any] = []
for key, info in class_info.items():
__lowerCamelCase : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_UpperCAmelCase ) )
__lowerCamelCase : str = thing_ids
__lowerCamelCase : Dict = class_names
return metadata
class _snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict=7 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : int=30 , UpperCAmelCase : List[Any]=400 , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Optional[Any]=255 , UpperCAmelCase : Optional[int]="shi-labs/oneformer_demo" , UpperCAmelCase : Optional[Any]="ade20k_panoptic.json" , UpperCAmelCase : List[Any]=10 , ):
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : List[str] = min_resolution
__lowerCamelCase : Union[str, Any] = max_resolution
__lowerCamelCase : Union[str, Any] = do_resize
__lowerCamelCase : Optional[int] = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
__lowerCamelCase : Union[str, Any] = do_normalize
__lowerCamelCase : List[Any] = image_mean
__lowerCamelCase : Optional[Any] = image_std
__lowerCamelCase : int = class_info_file
__lowerCamelCase : List[Any] = prepare_metadata(lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCamelCase : Union[str, Any] = num_text
__lowerCamelCase : Union[str, Any] = repo_path
# for the post_process_functions
__lowerCamelCase : int = 2
__lowerCamelCase : Optional[Any] = 10
__lowerCamelCase : Optional[int] = 10
__lowerCamelCase : str = 3
__lowerCamelCase : Tuple = 4
__lowerCamelCase : Any = num_labels
__lowerCamelCase : Dict = do_reduce_labels
__lowerCamelCase : int = ignore_index
def lowerCamelCase__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=False ):
if not batched:
__lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__lowerCamelCase : Optional[int] = image.size
else:
__lowerCamelCase : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] = int(self.size["shortest_edge"] * h / w )
__lowerCamelCase : Optional[int] = self.size["shortest_edge"]
elif w > h:
__lowerCamelCase : Union[str, Any] = self.size["shortest_edge"]
__lowerCamelCase : Dict = int(self.size["shortest_edge"] * w / h )
else:
__lowerCamelCase : Tuple = self.size["shortest_edge"]
__lowerCamelCase : Union[str, Any] = self.size["shortest_edge"]
else:
__lowerCamelCase : int = []
for image in image_inputs:
__lowerCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase : List[str] = max(lowerCAmelCase__ , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCamelCase : Optional[int] = max(lowerCAmelCase__ , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
def lowerCamelCase__ ( self : str ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
snake_case__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ = image_processing_class
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def lowerCamelCase__ ( self : List[Any] ):
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "ignore_index" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "class_info_file" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "num_text" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "repo_path" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "metadata" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_reduce_labels" ) )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
def lowerCamelCase__ ( self : Tuple ):
# Initialize image_processor
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__lowerCamelCase : Tuple = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : Any = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
__lowerCamelCase : Any = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : List[str] ):
# Initialize image_processor
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__lowerCamelCase : Dict = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : Tuple = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
__lowerCamelCase : Optional[int] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : List[Any] ):
# Initialize image_processor
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__lowerCamelCase : int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__lowerCamelCase : str = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : Optional[int] = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
__lowerCamelCase : List[Any] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Optional[Any]="np" ):
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCamelCase : Any = self.image_processing_tester.num_labels
__lowerCamelCase : Tuple = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
if with_segmentation_maps:
__lowerCamelCase : List[str] = num_labels
if is_instance_map:
__lowerCamelCase : Tuple = list(range(lowerCAmelCase__ ) ) * 2
__lowerCamelCase : Dict = dict(enumerate(lowerCAmelCase__ ) )
__lowerCamelCase : str = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCamelCase : Union[str, Any] = [Image.fromarray(lowerCAmelCase__ ) for annotation in annotations]
__lowerCamelCase : Union[str, Any] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , lowerCAmelCase__ , return_tensors="pt" , instance_id_to_semantic_id=lowerCAmelCase__ , pad_and_return_pixel_mask=lowerCAmelCase__ , )
return inputs
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : List[str] ):
def common(UpperCAmelCase : int=False , UpperCAmelCase : Any=None ):
__lowerCamelCase : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCAmelCase__ , is_instance_map=lowerCAmelCase__ , segmentation_type=lowerCAmelCase__ )
__lowerCamelCase : Optional[Any] = inputs["mask_labels"]
__lowerCamelCase : str = inputs["class_labels"]
__lowerCamelCase : Any = inputs["pixel_values"]
__lowerCamelCase : Union[str, Any] = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCAmelCase__ )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : List[str] = np.zeros((20, 50) )
__lowerCamelCase : str = 1
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 1
__lowerCamelCase : Tuple = binary_mask_to_rle(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__lowerCamelCase : int = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : Any = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCamelCase : Union[str, Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCamelCase : int = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ , target_sizes=lowerCAmelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__lowerCamelCase : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : Union[str, Any] = image_processor.post_process_instance_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__lowerCamelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : str = image_processor.post_process_panoptic_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 646
|
import re
def A_ ( _UpperCAmelCase ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase )
if upper:
SCREAMING_SNAKE_CASE_: List[str] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase ):
return to_simple_case(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671
| 0
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __SCREAMING_SNAKE_CASE ( __A ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=64 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=1 , ) -> Tuple:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
_a = q_groups
_a = k_groups
_a = v_groups
_a = post_attention_groups
_a = intermediate_groups
_a = output_groups
def a_ ( self ) -> Dict:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ) -> List[str]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_a = SqueezeBertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_a = model(UpperCamelCase__ , UpperCamelCase__ )
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
_a = SqueezeBertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_a = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
_a = SqueezeBertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_a = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
_a = self.num_labels
_a = SqueezeBertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_a = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_a = self.num_labels
_a = SqueezeBertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_a = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
_a = self.num_choices
_a = SqueezeBertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
(_a) = config_and_inputs
_a = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __A , __A , unittest.TestCase ):
UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCAmelCase = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = False
def a_ ( self ) -> Optional[int]:
_a = SqueezeBertModelTester(self )
_a = ConfigTester(self , config_class=UpperCamelCase__ , dim=37 )
def a_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def a_ ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase__ )
def a_ ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase__ )
def a_ ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase__ )
def a_ ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase__ )
def a_ ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase__ )
def a_ ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase__ )
@slow
def a_ ( self ) -> str:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = SqueezeBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def a_ ( self ) -> List[str]:
_a = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
_a = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
_a = model(UpperCamelCase__ )[0]
_a = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCamelCase__ )
_a = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 ) )
| 709
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
UpperCAmelCase = '''convbert'''
def __init__( self , __UpperCamelCase=30_522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3_072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1e-12 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , __UpperCamelCase=768 , __UpperCamelCase=2 , __UpperCamelCase=9 , __UpperCamelCase=1 , __UpperCamelCase=None , **__UpperCamelCase , ) -> Optional[int]:
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = embedding_size
_a = head_ratio
_a = conv_kernel_size
_a = num_groups
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def a_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 276
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : int
_lowercase : Node | None = None
_lowercase : Node | None = None
def A ():
_lowerCAmelCase = Node(1 )
_lowerCAmelCase = Node(2 )
_lowerCAmelCase = Node(3 )
_lowerCAmelCase = Node(4 )
_lowerCAmelCase = Node(5 )
return tree
def A (__lowerCamelCase :Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def A (__lowerCamelCase :Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def A (__lowerCamelCase :Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def A (__lowerCamelCase :Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def A (__lowerCamelCase :Node | None ):
_lowerCAmelCase = []
if root is None:
return output
_lowerCAmelCase = deque([root] )
while process_queue:
_lowerCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def A (__lowerCamelCase :Node | None , __lowerCamelCase :int ):
_lowerCAmelCase = []
def populate_output(__lowerCamelCase :Node | None , __lowerCamelCase :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def A (__lowerCamelCase :Node | None , __lowerCamelCase :int ):
_lowerCAmelCase = []
def populate_output(__lowerCamelCase :Node | None , __lowerCamelCase :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def A (__lowerCamelCase :Node | None ):
if root is None:
return []
_lowerCAmelCase = []
_lowerCAmelCase = 0
_lowerCAmelCase = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = 0
return output
def A (): # Main function for testing.
_lowerCAmelCase = make_tree()
print(f'In-order Traversal: {inorder(__lowerCamelCase )}' )
print(f'Pre-order Traversal: {preorder(__lowerCamelCase )}' )
print(f'Post-order Traversal: {postorder(__lowerCamelCase )}' , """\n""" )
print(f'Height of Tree: {height(__lowerCamelCase )}' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 5
|
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[Any] = {
'nielsr/canine-s': 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowercase : Dict = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowercase : List[str] = 0
lowercase : List[str] = 0Xe_000
lowercase : Optional[int] = 0Xe_001
lowercase : Union[str, Any] = 0Xe_002
lowercase : List[str] = 0Xe_003
lowercase : str = 0Xe_004
# Maps special codepoints to human-readable names.
lowercase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowercase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : int=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Tuple=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Tuple=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Union[str, Any]=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : int=chr(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : List[Any]=2_0_4_8 , **SCREAMING_SNAKE_CASE : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , model_max_length=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase = UNICODE_VOCAB_SIZE
lowerCAmelCase = len(self._special_codepoints )
@property
def __A ( self : List[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def __A ( self : str , SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
return list(SCREAMING_SNAKE_CASE )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
try:
return ord(SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def __A ( self : str , SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def __A ( self : Any , SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
return "".join(SCREAMING_SNAKE_CASE )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return result
def __A ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> str:
"""simple docstring"""
return ()
| 649
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :List[Any] , **__A :Optional[Any] ) -> int:
"""simple docstring"""
super().__init__(**__A )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(__A )
def _snake_case ( self :str , **__A :Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
# preprocess args
if "points_per_batch" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :List[Any] , __A :List[str] , *__A :Tuple , __A :Optional[int]=None , __A :str=None , **__A :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(__A , *__A , num_workers=__A , batch_size=__A , **__A )
def _snake_case ( self :Any , __A :int , __A :Optional[int]=64 , __A :int = 0 , __A :float = 512 / 1500 , __A :Optional[int] = 32 , __A :Optional[int] = 1 , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(__A )
SCREAMING_SNAKE_CASE__ = self.image_processor.size["""longest_edge"""]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor.generate_crop_boxes(
__A , __A , __A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor(images=__A , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ = self.get_inference_context()
with inference_context():
SCREAMING_SNAKE_CASE__ = self._ensure_tensor_on_device(__A , device=self.device )
SCREAMING_SNAKE_CASE__ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
SCREAMING_SNAKE_CASE__ = image_embeddings
SCREAMING_SNAKE_CASE__ = grid_points.shape[1]
SCREAMING_SNAKE_CASE__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , __A , __A ):
SCREAMING_SNAKE_CASE__ = grid_points[:, i : i + points_per_batch, :, :]
SCREAMING_SNAKE_CASE__ = input_labels[:, i : i + points_per_batch]
SCREAMING_SNAKE_CASE__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self :Optional[Any] , __A :str , __A :List[Any]=0.8_8 , __A :Tuple=0.9_5 , __A :Optional[Any]=0 , __A :List[Any]=1 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""input_boxes""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""is_last""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""original_sizes""" ).tolist()
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
SCREAMING_SNAKE_CASE__ = self.model(**__A )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
SCREAMING_SNAKE_CASE__ = model_outputs["""pred_masks"""]
SCREAMING_SNAKE_CASE__ = self.image_processor.post_process_masks(
__A , __A , __A , __A , binarize=__A )
SCREAMING_SNAKE_CASE__ = model_outputs["""iou_scores"""]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __A , __A , __A , __A , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self :Union[str, Any] , __A :Union[str, Any] , __A :Any=False , __A :Any=False , __A :str=0.7 , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
SCREAMING_SNAKE_CASE__ = torch.cat(__A )
SCREAMING_SNAKE_CASE__ = torch.cat(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor.post_process_for_mask_generation(
__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = defaultdict(__A )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__A )
SCREAMING_SNAKE_CASE__ = {}
if output_rle_mask:
SCREAMING_SNAKE_CASE__ = rle_mask
if output_bboxes_mask:
SCREAMING_SNAKE_CASE__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 711
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = RoCBertTokenizer
lowerCamelCase_ = None
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = filter_non_english
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
for i, value in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(__A , __A , ensure_ascii=__A )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__A , __A , ensure_ascii=__A )
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] )
def _snake_case ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = RoCBertBasicTokenizer(do_lower_case=__A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE__ = {}
for i, token in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = i
SCREAMING_SNAKE_CASE__ = RoCBertWordpieceTokenizer(vocab=__A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _snake_case ( self :Any ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self :int ) -> str:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def _snake_case ( self :int ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
SCREAMING_SNAKE_CASE__ = tokenizer_r.do_lower_case if hasattr(__A , """do_lower_case""" ) else False
SCREAMING_SNAKE_CASE__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE__ = """""".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer_r.convert_ids_to_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE__ = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你好""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode("""你是谁""" , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = """你好,你是谁"""
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_shape_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_pronunciation_ids(__A )
SCREAMING_SNAKE_CASE__ = tokenizer.prepare_for_model(
__A , __A , __A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = tokenizer.encode_plus(__A , add_special_tokens=__A )
self.assertEqual(__A , __A )
| 59
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileBertTokenizer
lowerCAmelCase_ = MobileBertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
lowerCAmelCase_ = '''google/mobilebert-uncased'''
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__SCREAMING_SNAKE_CASE : Dict = {}
for i, token in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[str] = i
__SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
__SCREAMING_SNAKE_CASE : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有''']
__SCREAMING_SNAKE_CASE : int = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE : List[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 74
| 1
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowercase__( a__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] =path_or_paths
UpperCamelCase__ : Union[str, Any] =split if split or isinstance(lowercase__ , lowercase__) else '''train'''
UpperCamelCase__ : Any =features
UpperCamelCase__ : Union[str, Any] =cache_dir
UpperCamelCase__ : int =keep_in_memory
UpperCamelCase__ : int =streaming
UpperCamelCase__ : Dict =num_proc
UpperCamelCase__ : str =kwargs
@abstractmethod
def UpperCAmelCase ( self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class lowercase__( a__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any =features
UpperCamelCase__ : int =cache_dir
UpperCamelCase__ : Any =keep_in_memory
UpperCamelCase__ : str =streaming
UpperCamelCase__ : List[Any] =num_proc
UpperCamelCase__ : Dict =kwargs
@abstractmethod
def UpperCAmelCase ( self) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 703
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase ( A_ : Any ) -> str:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase ( A_ : Optional[Any] , A_ : int ) -> str:
'''simple docstring'''
return (-y * np.log(A_ ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase ( A_ : Any , A_ : Union[str, Any] , A_ : str ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Dict =np.dot(A_ , A_ )
return np.sum(y * scores - np.log(1 + np.exp(A_ ) ) )
def _lowerCamelCase ( A_ : Optional[int] , A_ : List[str] , A_ : Any , A_ : Dict=7_0_0_0_0 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple =np.zeros(x.shape[1] )
for iterations in range(A_ ):
UpperCamelCase__ : List[Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Optional[int] =sigmoid_function(A_ )
UpperCamelCase__ : Optional[Any] =np.dot(x.T , h - y ) / y.size
UpperCamelCase__ : Optional[int] =theta - alpha * gradient # updating the weights
UpperCamelCase__ : Union[str, Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Any =sigmoid_function(A_ )
UpperCamelCase__ : Dict =cost_function(A_ , A_ )
if iterations % 1_0_0 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__UpperCAmelCase = datasets.load_iris()
__UpperCAmelCase = iris.data[:, :2]
__UpperCAmelCase = (iris.target != 0) * 1
__UpperCAmelCase = 0.1
__UpperCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def _lowerCamelCase ( A_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return sigmoid_function(
np.dot(A_ , A_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__UpperCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
__UpperCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 582
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.