code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_ ( snake_case__ ):
'''simple docstring'''
def __init__( self, A_, A_, A_, A_ = None, ) -> Any:
super().__init__()
self.register_modules(transformer=lowerCamelCase_, vae=lowerCamelCase_, scheduler=lowerCamelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase__ ={}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase__ =int(lowerCamelCase_ )
UpperCAmelCase__ =dict(sorted(self.labels.items() ) )
def __UpperCAmelCase ( self, A_ ) -> List[int]:
if not isinstance(lowerCamelCase_, lowerCamelCase_ ):
UpperCAmelCase__ =list(lowerCamelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self, A_, A_ = 4.0, A_ = None, A_ = 50, A_ = "pil", A_ = True, ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase__ =len(lowerCamelCase_ )
UpperCAmelCase__ =self.transformer.config.sample_size
UpperCAmelCase__ =self.transformer.config.in_channels
UpperCAmelCase__ =randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size), generator=lowerCamelCase_, device=self.device, dtype=self.transformer.dtype, )
UpperCAmelCase__ =torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase__ =torch.tensor(lowerCamelCase_, device=self.device ).reshape(-1 )
UpperCAmelCase__ =torch.tensor([1000] * batch_size, device=self.device )
UpperCAmelCase__ =torch.cat([class_labels, class_null], 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase__ =latent_model_input[: len(lowerCamelCase_ ) // 2]
UpperCAmelCase__ =torch.cat([half, half], dim=0 )
UpperCAmelCase__ =self.scheduler.scale_model_input(lowerCamelCase_, lowerCamelCase_ )
UpperCAmelCase__ =t
if not torch.is_tensor(lowerCamelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase__ =latent_model_input.device.type == "mps"
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
UpperCAmelCase__ =torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase__ =torch.intaa if is_mps else torch.intaa
UpperCAmelCase__ =torch.tensor([timesteps], dtype=lowerCamelCase_, device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase__ =timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase__ =timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase__ =self.transformer(
lowerCamelCase_, timestep=lowerCamelCase_, class_labels=lowerCamelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase__ , UpperCAmelCase__ =noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase__ , UpperCAmelCase__ =torch.split(lowerCamelCase_, len(lowerCamelCase_ ) // 2, dim=0 )
UpperCAmelCase__ =uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase__ =torch.cat([half_eps, half_eps], dim=0 )
UpperCAmelCase__ =torch.cat([eps, rest], dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase__ , UpperCAmelCase__ =torch.split(lowerCamelCase_, lowerCamelCase_, dim=1 )
else:
UpperCAmelCase__ =noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase__ =self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase__ , UpperCAmelCase__ =latent_model_input.chunk(2, dim=0 )
else:
UpperCAmelCase__ =latent_model_input
UpperCAmelCase__ =1 / self.vae.config.scaling_factor * latents
UpperCAmelCase__ =self.vae.decode(lowerCamelCase_ ).sample
UpperCAmelCase__ =(samples / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase__ =samples.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ =self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 625
|
"""simple docstring"""
def snake_case__ ( _snake_case : int ):
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCamelCase__ = len(bin(_snake_case )[3:] )
UpperCamelCase__ = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ = (
(
"1"
+ "0" * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : int = 7_6_8 ,):
super().__init__()
__lowerCamelCase : List[str] = nn.Parameter(torch.zeros(1 ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = nn.Parameter(torch.ones(1 ,SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, torch.device]] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.dtype] = None ,):
__lowerCamelCase : str = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Dict = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__))
return self
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : int = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = (embeds * self.std) + self.mean
return embeds
| 706
|
a ="""0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 337
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
__snake_case : Union[str, Any] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=None ) -> List[Any]:
# Initialise PyTorch model
__lowerCAmelCase : Union[str, Any] = XLNetConfig.from_json_file(_UpperCAmelCase )
__lowerCAmelCase : str = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
__lowerCAmelCase : List[str] = finetuning_task
__lowerCAmelCase : Optional[int] = GLUE_TASKS_NUM_LABELS[finetuning_task]
__lowerCAmelCase : List[Any] = XLNetForSequenceClassification(_UpperCAmelCase )
elif "squad" in finetuning_task:
__lowerCAmelCase : Tuple = finetuning_task
__lowerCAmelCase : Dict = XLNetForQuestionAnswering(_UpperCAmelCase )
else:
__lowerCAmelCase : Dict = XLNetLMHeadModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
__lowerCAmelCase : int = os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : List[str] = os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCAmelCase )}""" )
torch.save(model.state_dict() ,_UpperCAmelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCAmelCase )}""" )
with open(_UpperCAmelCase ,"w" ,encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
__snake_case : Dict = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 293
|
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73
| 0
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =[10, 20, 30, 40, 50, 60]
__UpperCamelCase =[2, 4, 6, 8, 10, 12]
__UpperCamelCase =100
self.assertEqual(kp.calc_profit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 210 )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase__ , '''max_weight must greater than zero.''' )
def UpperCAmelCase_ ( self : str ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase__ , '''Weight can not be negative.''' )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase__ , '''Profit can not be negative.''' )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(UpperCamelCase__ , '''max_weight must greater than zero.''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertRaisesRegex(
UpperCamelCase__ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 296
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _lowercase :
"""simple docstring"""
lowercase__ = LEDConfig
lowercase__ = {}
lowercase__ = '''gelu'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : int=37 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[int]=20 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Tuple=4 , ) -> str:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =eos_token_id
__UpperCamelCase =pad_token_id
__UpperCamelCase =bos_token_id
__UpperCamelCase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__UpperCamelCase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__UpperCamelCase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__UpperCamelCase =prepare_led_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tf.concat(
[tf.zeros_like(UpperCamelCase__ )[:, :-1], tf.ones_like(UpperCamelCase__ )[:, -1:]] , axis=-1 , )
__UpperCamelCase =global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =TFLEDModel(config=UpperCamelCase__ ).get_decoder()
__UpperCamelCase =inputs_dict['''input_ids''']
__UpperCamelCase =input_ids[:1, :]
__UpperCamelCase =inputs_dict['''attention_mask'''][:1, :]
__UpperCamelCase =1
# first forward pass
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__UpperCamelCase , __UpperCamelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase =tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =tf.zeros_like(inputs_dict['''attention_mask'''] )
__UpperCamelCase =2
__UpperCamelCase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
__UpperCamelCase =True
__UpperCamelCase =self.model_tester.seq_length
__UpperCamelCase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCamelCase__ : Tuple ):
__UpperCamelCase =outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCamelCase__ : Dict ):
__UpperCamelCase =[t.numpy() for t in outputs.encoder_attentions]
__UpperCamelCase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCamelCase =len(UpperCamelCase__ )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
if self.is_encoder_decoder:
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_decoder_attentions_output(UpperCamelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
# Check attention is always last and order is fine
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =model_class(UpperCamelCase__ )
__UpperCamelCase =model(self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase__ )
check_encoder_attentions_output(UpperCamelCase__ )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__lowercase = 1e-4
@slow
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
__UpperCamelCase =_long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =_long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__UpperCamelCase =prepare_led_inputs_dict(model.config , UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =model(**UpperCamelCase__ )[0]
__UpperCamelCase =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCamelCase__ )
# change to expected output here
__UpperCamelCase =tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-3 , rtol=1E-3 )
| 296
| 1
|
import sys
def __magic_name__ ( __lowerCAmelCase : str ) -> Union[str, Any]:
__lowerCamelCase = len(__lowerCAmelCase )
__lowerCamelCase = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
__lowerCamelCase = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
__lowerCamelCase = a + chain_length - 1
__lowerCamelCase = sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCamelCase = cost
__lowerCamelCase = c
return matrix, sol
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> List[str]:
if i == j:
print('''A''' + str(__lowerCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(''')''' , end=''' ''' )
def __magic_name__ ( ) -> Optional[Any]:
__lowerCamelCase = [30, 35, 15, 5, 10, 20, 25]
__lowerCamelCase = len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCamelCase , __lowerCamelCase = matrix_chain_order(__lowerCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 298
|
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
__lowerCamelCase = [1]
for i in range(2 , __lowerCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowerCamelCase = []
__lowerCamelCase = list(range(__lowerCAmelCase ) )
# Find permutation
while factorials:
__lowerCamelCase = factorials.pop()
__lowerCamelCase , __lowerCamelCase = divmod(__lowerCAmelCase , __lowerCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ : Any = 'src/diffusers'
UpperCAmelCase_ : str = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ : Dict = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ : Dict = spec.loader.load_module()
def _lowercase ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[str] ):
return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$', _snake_case ) is not None
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
__A : List[str] = object_name.split('.' )
__A : Tuple = 0
# First let's find the module where our object lives.
__A : int = parts[i]
while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case, f"""{module}.py""" ) ):
i += 1
if i < len(_snake_case ):
__A : Any = os.path.join(_snake_case, parts[i] )
if i >= len(_snake_case ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_snake_case, f"""{module}.py""" ), 'r', encoding='utf-8', newline='\n' ) as f:
__A : Union[str, Any] = f.readlines()
# Now let's find the class / func in the code!
__A : str = ''
__A : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_snake_case ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_snake_case ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__A : int = line_index
while line_index < len(_snake_case ) and _should_continue(lines[line_index], _snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : Dict = lines[start_index:line_index]
return "".join(_snake_case )
UpperCAmelCase_ : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCAmelCase_ : Dict = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCAmelCase_ : Optional[Any] = re.compile(R'<FILL\s+[^>]*>')
def _lowercase ( UpperCamelCase__ : Optional[int] ):
__A : Optional[int] = code.split('\n' )
__A : int = 0
while idx < len(_snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_snake_case ):
return re.search(r'^(\s*)\S', lines[idx] ).groups()[0]
return ""
def _lowercase ( UpperCamelCase__ : str ):
__A : List[Any] = len(get_indent(_snake_case ) ) > 0
if has_indent:
__A : Any = f"""class Bla:\n{code}"""
__A : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=_snake_case )
__A : str = black.format_str(_snake_case, mode=_snake_case )
__A ,__A : Optional[Any] = style_docstrings_in_code(_snake_case )
return result[len('class Bla:\n' ) :] if has_indent else result
def _lowercase ( UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any]=False ):
with open(_snake_case, 'r', encoding='utf-8', newline='\n' ) as f:
__A : str = f.readlines()
__A : Union[str, Any] = []
__A : Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_snake_case ):
__A : List[str] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__A ,__A ,__A : int = search.groups()
__A : Optional[Any] = find_code_in_diffusers(_snake_case )
__A : List[Any] = get_indent(_snake_case )
__A : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
__A : Any = theoretical_indent
__A : List[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__A : int = True
while line_index < len(_snake_case ) and should_continue:
line_index += 1
if line_index >= len(_snake_case ):
break
__A : Optional[Any] = lines[line_index]
__A : Tuple = _should_continue(_snake_case, _snake_case ) and re.search(f"""^{indent}# End copy""", _snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : int = lines[start_index:line_index]
__A : int = ''.join(_snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__A : int = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(_snake_case ) is None]
__A : List[Any] = '\n'.join(_snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(_snake_case ) > 0:
__A : Optional[Any] = replace_pattern.replace('with', '' ).split(',' )
__A : Optional[int] = [_re_replace_pattern.search(_snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__A ,__A ,__A : Dict = pattern.groups()
__A : Optional[int] = re.sub(_snake_case, _snake_case, _snake_case )
if option.strip() == "all-casing":
__A : List[str] = re.sub(obja.lower(), obja.lower(), _snake_case )
__A : Optional[int] = re.sub(obja.upper(), obja.upper(), _snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__A : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
__A : str = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__A : Dict = lines[:start_index] + [theoretical_code] + lines[line_index:]
__A : List[str] = start_index + 1
if overwrite and len(_snake_case ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(_snake_case, 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(_snake_case )
return diffs
def _lowercase ( UpperCamelCase__ : str = False ):
__A : Dict = glob.glob(os.path.join(_snake_case, '**/*.py' ), recursive=_snake_case )
__A : List[Any] = []
for filename in all_files:
__A : Any = is_copy_consistent(_snake_case, _snake_case )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_snake_case ) > 0:
__A : Dict = '\n'.join(_snake_case )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 711
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCAmelCase_ : Dict = getLogger(__name__)
def _lowercase ( UpperCamelCase__ : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : int = 8, UpperCamelCase__ : int = 1024, UpperCamelCase__ : List[Any]="val", UpperCamelCase__ : int=None, UpperCamelCase__ : str=False, UpperCamelCase__ : int="summarization", UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : List[Any]=1, UpperCamelCase__ : Dict = None, UpperCamelCase__ : Optional[int]="", **UpperCamelCase__ : str, ):
__A : Dict = str(UpperCamelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl', rank=UpperCamelCase__ )
__A : Union[str, Any] = Path(UpperCamelCase__ )
__A : Optional[int] = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(UpperCamelCase__ )
__A : Dict = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).cuda()
if fpaa:
__A : Optional[Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(UpperCamelCase__, UpperCamelCase__ ) # update config with task specific params
__A : Any = generate_kwargs.pop('num_beams', model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__A : int = num_return_sequences
__A : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__A : Union[str, Any] = tokenizer.model_max_length
if prefix is None:
__A : List[Any] = prefix or getattr(model.config, 'prefix', '' ) or ''
__A : Tuple = SeqaSeqDataset(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, max_target_length=1024, type_path=UpperCamelCase__, n_obs=UpperCamelCase__, prefix=UpperCamelCase__, **UpperCamelCase__, )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__A : Any = ds.make_sortish_sampler(UpperCamelCase__, distributed=UpperCamelCase__, add_extra_examples=UpperCamelCase__, shuffle=UpperCamelCase__ )
__A : Union[str, Any] = DataLoader(UpperCamelCase__, sampler=UpperCamelCase__, batch_size=UpperCamelCase__, collate_fn=ds.collate_fn )
__A : Tuple = []
for batch in tqdm(UpperCamelCase__ ):
__A : Any = model.generate(
input_ids=batch['input_ids'].to(model.device ), attention_mask=batch['attention_mask'].to(model.device ), num_return_sequences=UpperCamelCase__, num_beams=UpperCamelCase__, **UpperCamelCase__, )
__A : Dict = tokenizer.batch_decode(UpperCamelCase__, skip_special_tokens=UpperCamelCase__, clean_up_tokenization_spaces=UpperCamelCase__ )
__A : List[str] = batch['ids']
if num_return_sequences > 1:
__A : str = chunks(UpperCamelCase__, UpperCamelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(UpperCamelCase__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(UpperCamelCase__, UpperCamelCase__ )
return results, sampler.num_replicas
def _lowercase ( ):
__A : Optional[Any] = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir', type=UpperCamelCase__, help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name', type=UpperCamelCase__, help='like facebook/bart-large-cnn,t5-base, etc.', default='sshleifer/distilbart-xsum-12-3', )
parser.add_argument('--save_dir', type=UpperCamelCase__, help='where to save', default='tmp_gen' )
parser.add_argument('--max_source_length', type=UpperCamelCase__, default=UpperCamelCase__ )
parser.add_argument(
'--type_path', type=UpperCamelCase__, default='test', help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task', type=UpperCamelCase__, default='summarization', help='used for task_specific_params + metrics' )
parser.add_argument('--bs', type=UpperCamelCase__, default=8, required=UpperCamelCase__, help='batch size' )
parser.add_argument(
'--local_rank', type=UpperCamelCase__, default=-1, required=UpperCamelCase__, help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs', type=UpperCamelCase__, default=UpperCamelCase__, required=UpperCamelCase__, help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences', type=UpperCamelCase__, default=1, required=UpperCamelCase__, help='How many sequences to return' )
parser.add_argument(
'--sync_timeout', type=UpperCamelCase__, default=600, required=UpperCamelCase__, help='How long should master process wait for other processes to finish.', )
parser.add_argument('--src_lang', type=UpperCamelCase__, default=UpperCamelCase__, required=UpperCamelCase__ )
parser.add_argument('--tgt_lang', type=UpperCamelCase__, default=UpperCamelCase__, required=UpperCamelCase__ )
parser.add_argument(
'--prefix', type=UpperCamelCase__, required=UpperCamelCase__, default=UpperCamelCase__, help='will be added to the begininng of src examples' )
parser.add_argument('--fp16', action='store_true' )
parser.add_argument('--debug', action='store_true' )
__A : int = time.time()
__A ,__A : int = parser.parse_known_args()
__A : List[str] = parse_numeric_n_bool_cl_kwargs(UpperCamelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
__A : List[str] = Path(args.save_dir + '_tmp' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) # this handles locking.
__A : Optional[Any] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__A : List[str] = {}
if args.src_lang is not None:
__A : Dict = args.src_lang
if args.tgt_lang is not None:
__A : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=UpperCamelCase__ )
__A ,__A : List[Any] = eval_data_dir(
args.data_dir, UpperCamelCase__, args.model_name, type_path=args.type_path, bs=args.bs, fpaa=args.fpaa, task=args.task, local_rank=args.local_rank, n_obs=args.n_obs, max_source_length=args.max_source_length, num_return_sequences=args.num_return_sequences, prefix=args.prefix, dataset_kwargs=UpperCamelCase__, **UpperCamelCase__, )
if args.local_rank <= 0:
__A : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=UpperCamelCase__ )
__A : Dict = gather_results_from_each_node(UpperCamelCase__, UpperCamelCase__, args.sync_timeout )
__A : Union[str, Any] = combine_partial_results(UpperCamelCase__ )
if args.num_return_sequences > 1:
__A : Any = save_dir.joinpath('pseudolabel_results.json' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(UpperCamelCase__, UpperCamelCase__ )
return
__A : int = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(UpperCamelCase__ ) as f:
__A : str = [x.rstrip() for x in f.readlines()][: len(UpperCamelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
__A : Tuple = 'translation' in args.task
__A : Union[str, Any] = calculate_bleu if calc_bleu else calculate_rouge
__A : Optional[Any] = 'bleu' if calc_bleu else 'rouge'
__A : Dict = score_fn(UpperCamelCase__, UpperCamelCase__ )
__A : Any = len(UpperCamelCase__ )
__A : Tuple = time.time() - start_time
__A : Tuple = round(runtime / metrics['n_obs'], 4 )
__A : int = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__A : List[Any] = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(UpperCamelCase__, UpperCamelCase__, indent=UpperCamelCase__ )
print(UpperCamelCase__ )
write_txt_file(UpperCamelCase__, save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(UpperCamelCase__, save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(UpperCamelCase__ )
def _lowercase ( UpperCamelCase__ : List[str] ):
__A : List[str] = []
for partial_result in partial_results:
records.extend(UpperCamelCase__ )
__A : List[Any] = sorted(UpperCamelCase__, key=lambda UpperCamelCase__ : x["id"] )
__A : Dict = [x['pred'] for x in records]
return preds
def _lowercase ( UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : int ):
# WAIT FOR lots of .json files
__A : Dict = time.time()
logger.info('waiting for all nodes to finish' )
__A : int = None
while (time.time() - start_wait) < timeout:
__A : List[Any] = list(save_dir.glob('rank_*.json' ) )
if len(UpperCamelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__A : List[Any] = lmap(UpperCamelCase__, UpperCamelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 540
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase :
def __init__( self :Optional[int] , _lowercase :Optional[int] , _lowercase :Tuple=13 , _lowercase :Optional[Any]=32 , _lowercase :Optional[Any]=2 , _lowercase :Tuple=3 , _lowercase :Optional[Any]=16 , _lowercase :Union[str, Any]=[1, 2, 1] , _lowercase :Any=[2, 2, 4] , _lowercase :List[Any]=2 , _lowercase :Any=2.0 , _lowercase :List[str]=True , _lowercase :Dict=0.0 , _lowercase :List[str]=0.0 , _lowercase :Optional[Any]=0.1 , _lowercase :Dict="gelu" , _lowercase :Dict=False , _lowercase :Optional[int]=True , _lowercase :str=0.02 , _lowercase :Any=1e-5 , _lowercase :str=True , _lowercase :Union[str, Any]=None , _lowercase :int=True , _lowercase :Tuple=10 , _lowercase :Union[str, Any]=8 , _lowercase :str=["stage1", "stage2", "stage3"] , _lowercase :List[Any]=[1, 2, 3] , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase ( self :Dict , _lowercase :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Optional[int] , _lowercase :List[Any] , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
lowercase__ = ["stem"]
lowercase__ = MaskFormerSwinBackbone(config=_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__lowerCamelCase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip("Swin does not use inputs_embeds" )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[int] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase :List[Any] ):
lowercase__ = 0
return t
def check_equivalence(_lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Optional[Any] , _lowercase :str={} ):
with torch.no_grad():
lowercase__ = model(**_lowercase , return_dict=_lowercase , **_lowercase )
lowercase__ = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase :List[str] , _lowercase :str ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has'''
f''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {"output_hidden_states": True} )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
lowercase__ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {"output_hidden_states": True} )
@require_torch
class lowerCAmelCase ( unittest.TestCase , lowercase_ ):
__lowerCamelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__lowerCamelCase = MaskFormerSwinConfig
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = MaskFormerSwinModelTester(self )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
lowercase__ = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 655
|
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655
| 1
|
from maths.prime_factors import prime_factors
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Input value of [number={number}] must be an integer"
raise TypeError(_UpperCAmelCase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(_UpperCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
import datasets
from .evaluate import evaluate
_lowerCAmelCase : Tuple ='''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
_lowerCAmelCase : Union[str, Any] ='''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
_lowerCAmelCase : Optional[Any] ='''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Union[str, Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase__: Union[str, Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase__: int = evaluate(dataset=_lowercase , predictions=_lowercase )
return score
| 113
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
__lowerCAmelCase : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCAmelCase : Union[str, Any] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCAmelCase : Optional[int] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Optional[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
snake_case_ : List[str] = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
snake_case_ : Optional[Any] = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 58
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case ( __lowercase ):
_lowercase : Any = (DEISMultistepScheduler,)
_lowercase : int = (('''num_inference_steps''', 25),)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Tuple:
SCREAMING_SNAKE_CASE = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**_A)
return config
def SCREAMING_SNAKE_CASE__ ( self , a=0 , **a) -> Dict:
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE = kwargs.pop('num_inference_steps' , _A)
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_A)
SCREAMING_SNAKE_CASE = scheduler_class(**_A)
scheduler.set_timesteps(_A)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A)
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_A)
new_scheduler.set_timesteps(_A)
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(_A , time_step + scheduler.config.solver_order + 1):
SCREAMING_SNAKE_CASE = scheduler.step(_A , _A , _A , **_A).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_A , _A , _A , **_A).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self) -> str:
pass
def SCREAMING_SNAKE_CASE__ ( self , a=0 , **a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE = kwargs.pop('num_inference_steps' , _A)
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_A)
scheduler.set_timesteps(_A)
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A)
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_A)
# copy over dummy past residuals
new_scheduler.set_timesteps(_A)
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(_A , _A , _A , **_A).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_A , _A , _A , **_A).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self , a=None , **a) -> Union[str, Any]:
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_A)
SCREAMING_SNAKE_CASE = scheduler_class(**_A)
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_A)
SCREAMING_SNAKE_CASE = scheduler_class(**_A)
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_A)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE = model(_A , _A)
SCREAMING_SNAKE_CASE = scheduler.step(_A , _A , _A).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs)
SCREAMING_SNAKE_CASE = kwargs.pop('num_inference_steps' , _A)
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_A)
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , 'set_timesteps'):
scheduler.set_timesteps(_A)
elif num_inference_steps is not None and not hasattr(_A , 'set_timesteps'):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(_A , _A , _A , **_A).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(_A , _A , _A , **_A).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE = DEISMultistepScheduler(**self.get_scheduler_config())
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_A)
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_A))
assert abs(result_mean.item() - 0.2_39_16) < 1E-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config)
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config)
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config)
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config)
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_A)
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_A))
assert abs(result_mean.item() - 0.2_39_16) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
self.check_over_configs(thresholding=_A)
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , algorithm_type='deis' , solver_order=_A , solver_type=_A , )
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
assert not torch.isnan(_A).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
self.check_over_configs(lower_order_final=_A)
self.check_over_configs(lower_order_final=_A)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A , time_step=0)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_A))
assert abs(result_mean.item() - 0.2_39_16) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type='v_prediction')
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_A))
assert abs(result_mean.item() - 0.0_91) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0)
SCREAMING_SNAKE_CASE = scheduler_class(**_A)
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE = model(_A , _A)
SCREAMING_SNAKE_CASE = scheduler.step(_A , _A , _A).prev_sample
assert sample.dtype == torch.floataa
| 705
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
a_ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class _snake_case :
_lowercase : Optional[int] = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_lowercase : bool = field(
default=A__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_lowercase : Optional[int] = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class _snake_case :
_lowercase : str = field(
default=A__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowercase : str = field(
default=A__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowercase : Optional[bool] = field(
default=A__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowercase : bool = field(
default=A__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _UpperCAmelCase)
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase)
datasets.utils.logging.set_verbosity(_UpperCAmelCase)
transformers.utils.logging.set_verbosity(_UpperCAmelCase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.')
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
SCREAMING_SNAKE_CASE = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = train_dataset.features['label'].names
if training_args.do_eval:
SCREAMING_SNAKE_CASE = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = eval_dataset.features['label'].names
if training_args.do_predict:
SCREAMING_SNAKE_CASE = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = predict_dataset.features['label'].names
# Labels
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , idalabel={str(_UpperCAmelCase): label for i, label in enumerate(_UpperCAmelCase)} , labelaid={label: i for i, label in enumerate(_UpperCAmelCase)} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE = False
def preprocess_function(_UpperCAmelCase):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_UpperCAmelCase , max_length=data_args.max_seq_length , truncation=_UpperCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_UpperCAmelCase) , data_args.max_train_samples)
SCREAMING_SNAKE_CASE = train_dataset.select(range(_UpperCAmelCase))
with training_args.main_process_first(desc='train dataset map pre-processing'):
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_UpperCAmelCase)) , 3):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''')
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_UpperCAmelCase) , data_args.max_eval_samples)
SCREAMING_SNAKE_CASE = eval_dataset.select(range(_UpperCAmelCase))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
SCREAMING_SNAKE_CASE = eval_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_UpperCAmelCase) , data_args.max_predict_samples)
SCREAMING_SNAKE_CASE = predict_dataset.select(range(_UpperCAmelCase))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
SCREAMING_SNAKE_CASE = predict_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
SCREAMING_SNAKE_CASE = evaluate.load('xnli')
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase) else p.predictions
SCREAMING_SNAKE_CASE = np.argmax(_UpperCAmelCase , axis=1)
return metric.compute(predictions=_UpperCAmelCase , references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8)
else:
SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase)
)
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase , len(_UpperCAmelCase))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase)
trainer.save_metrics('train' , _UpperCAmelCase)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase , len(_UpperCAmelCase))
trainer.log_metrics('eval' , _UpperCAmelCase)
trainer.save_metrics('eval' , _UpperCAmelCase)
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict')
SCREAMING_SNAKE_CASE = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCAmelCase)
)
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase , len(_UpperCAmelCase))
trainer.log_metrics('predict' , _UpperCAmelCase)
trainer.save_metrics('predict' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.argmax(_UpperCAmelCase , axis=1)
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'predictions.txt')
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w') as writer:
writer.write('index\tprediction\n')
for index, item in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = label_list[item]
writer.write(F'''{index}\t{item}\n''')
if __name__ == "__main__":
main()
| 444
| 0
|
'''simple docstring'''
from timeit import timeit
a_ : List[Any] = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = 0
lowerCamelCase_ = len(UpperCAmelCase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = len(UpperCAmelCase_ ) // 2
lowerCamelCase_ = len(UpperCAmelCase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(UpperCAmelCase_ ) )
def __snake_case ( UpperCAmelCase_ : str ):
if len(UpperCAmelCase_ ) <= 2:
return True
if s[0] == s[len(UpperCAmelCase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __snake_case ( UpperCAmelCase_ : str ):
return s == s[::-1]
def __snake_case ( UpperCAmelCase_ : str ):
lowerCamelCase_ = F'''all({name}(key) is value for key, value in test_data.items())'''
lowerCamelCase_ = F'''from __main__ import test_data, {name}'''
lowerCamelCase_ = 500000
lowerCamelCase_ = timeit(stmt=UpperCAmelCase_ , setup=UpperCAmelCase_ , number=UpperCAmelCase_ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 675
|
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a_ : Optional[int] = HfArgumentParser(InitializationArguments)
a_ : str = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a_ : str = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
a_ : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a_ : Optional[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 675
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase : str = logging.get_logger(__name__)
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[str] = b.T
__UpperCAmelCase : Tuple = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
__UpperCAmelCase : Union[str, Any] = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
__UpperCAmelCase : List[Any] = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase : Optional[Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = x.reshape(-1 , 3 )
__UpperCAmelCase : List[Any] = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class lowerCamelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
__a = ["""pixel_values"""]
def __init__( self : Optional[Any] , UpperCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : bool = True , **UpperCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
__UpperCAmelCase : Dict = size if size is not None else {"""height""": 256, """width""": 256}
__UpperCAmelCase : str = get_size_dict(lowerCAmelCase_ )
__UpperCAmelCase : List[Any] = np.array(lowerCAmelCase_ ) if clusters is not None else None
__UpperCAmelCase : Optional[Any] = do_resize
__UpperCAmelCase : Any = size
__UpperCAmelCase : Tuple = resample
__UpperCAmelCase : Any = do_normalize
__UpperCAmelCase : int = do_color_quantize
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowerCAmelCase_ , size=(size["""height"""], size["""width"""]) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = rescale(image=lowerCAmelCase_ , scale=1 / 127.5 , data_format=lowerCAmelCase_ )
__UpperCAmelCase : str = image - 1
return image
def lowerCamelCase__ ( self : Dict , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : List[Any] = size if size is not None else self.size
__UpperCAmelCase : List[Any] = get_size_dict(lowerCAmelCase_ )
__UpperCAmelCase : int = resample if resample is not None else self.resample
__UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Dict = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase : List[str] = clusters if clusters is not None else self.clusters
__UpperCAmelCase : Optional[Any] = np.array(lowerCAmelCase_ )
__UpperCAmelCase : int = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : str = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__UpperCAmelCase : str = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_normalize:
__UpperCAmelCase : Optional[Any] = [self.normalize(image=lowerCAmelCase_ ) for image in images]
if do_color_quantize:
__UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase : str = np.array(lowerCAmelCase_ )
__UpperCAmelCase : int = color_quantize(lowerCAmelCase_ , lowerCAmelCase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase : int = images.shape[0]
__UpperCAmelCase : List[str] = images.reshape(lowerCAmelCase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase : int = list(lowerCAmelCase_ )
else:
__UpperCAmelCase : List[Any] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__UpperCAmelCase : str = {"""input_ids""": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 712
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """bloom"""
__a = ["""past_key_values"""]
__a = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : Optional[Any] , UpperCamelCase : Any=250_880 , UpperCamelCase : int=64 , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=8 , UpperCamelCase : int=1e-5 , UpperCamelCase : str=0.02 , UpperCamelCase : List[str]=True , UpperCamelCase : Dict=1 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Optional[Any]=False , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : Optional[int]=1 , UpperCamelCase : Any=False , **UpperCamelCase : str , ):
'''simple docstring'''
__UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCAmelCase : Union[str, Any] = kwargs.pop("""n_embed""" , UpperCamelCase )
__UpperCAmelCase : Dict = hidden_size if n_embed is None else n_embed
__UpperCAmelCase : List[Any] = n_layer
__UpperCAmelCase : Tuple = n_head
__UpperCAmelCase : Tuple = layer_norm_epsilon
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : Union[str, Any] = pretraining_tp
__UpperCAmelCase : Optional[int] = apply_residual_connection_post_layernorm
__UpperCAmelCase : List[Any] = hidden_dropout
__UpperCAmelCase : List[str] = attention_dropout
__UpperCAmelCase : Optional[int] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
__UpperCAmelCase : List[Any] = slow_but_exact
super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = version.parse("""1.12""" )
def __init__( self : Optional[Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ):
'''simple docstring'''
super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase )
if not getattr(self._config , """pad_token_id""" , UpperCamelCase ):
# TODO: how to do that better?
__UpperCAmelCase : List[str] = 0
@property
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(UpperCamelCase , direction="""inputs""" , inverted_values_shape=UpperCamelCase )
__UpperCAmelCase : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__UpperCAmelCase : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return self._config.n_head
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return 1e-3
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : "PreTrainedTokenizer" , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = super(UpperCamelCase , self ).generate_dummy_inputs(
UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__UpperCAmelCase ,__UpperCAmelCase : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__UpperCAmelCase : Union[str, Any] = seqlen + 2
__UpperCAmelCase : int = self._config.hidden_size // self.num_attention_heads
__UpperCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCAmelCase : str = [
(torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers )
]
__UpperCAmelCase : Union[str, Any] = common_inputs["""attention_mask"""]
if self.use_past:
__UpperCAmelCase : List[str] = ordered_inputs["""attention_mask"""].dtype
__UpperCAmelCase : List[str] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 13
| 299
| 0
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase_ ( _a , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = BarthezTokenizer
__SCREAMING_SNAKE_CASE : Dict = BarthezTokenizerFast
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : int = True
def snake_case_ ( self : int ):
super().setUp()
_UpperCAmelCase : List[Any] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowerCamelCase )
_UpperCAmelCase : Optional[Any] = tokenizer
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Optional[Any] = "<pad>"
_UpperCAmelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def snake_case_ ( self : str ):
_UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCamelCase ) , 1_0_1_1_2_2 )
def snake_case_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase : str = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_UpperCAmelCase : str = self.tokenizer(
__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def snake_case_ ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : str = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Tuple = tokenizer.tokenize(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : Optional[int] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
_UpperCAmelCase : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : str = self.get_rust_tokenizer()
_UpperCAmelCase : Any = tokenizer.encode(__lowerCamelCase )
_UpperCAmelCase : Any = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@slow
def snake_case_ ( self : int ):
_UpperCAmelCase : Any = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase : Optional[int] = [
"Le transformeur est un modèle d\'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__lowerCamelCase , )
| 289
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def a_ ( __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
lowerCAmelCase__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase__ = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__lowerCAmelCase , output_all_encodings=__lowerCAmelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __lowerCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase__ = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase__ = os.path.join(get_home_dir() , '''models''' )
lowerCAmelCase__ = _load_vocab(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls=__lowerCAmelCase )
lowerCAmelCase__ = nlp.model.BERTModel(
__lowerCAmelCase , len(__lowerCAmelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__lowerCAmelCase , use_token_type_embed=__lowerCAmelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__lowerCAmelCase , use_decoder=__lowerCAmelCase , )
original_bort.load_parameters(__lowerCAmelCase , cast_dtype=__lowerCAmelCase , ignore_extra=__lowerCAmelCase )
lowerCAmelCase__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase__ = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__lowerCAmelCase ),
}
lowerCAmelCase__ = BertConfig.from_dict(__lowerCAmelCase )
lowerCAmelCase__ = BertForMaskedLM(__lowerCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCAmelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase__ = hf_param.shape
lowerCAmelCase__ = to_torch(params[gluon_param] )
lowerCAmelCase__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
lowerCAmelCase__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase__ = layer.attention.self
lowerCAmelCase__ = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
lowerCAmelCase__ = layer.attention.output
lowerCAmelCase__ = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
lowerCAmelCase__ = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
lowerCAmelCase__ = layer.intermediate
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
lowerCAmelCase__ = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
lowerCAmelCase__ = layer.output
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
lowerCAmelCase__ = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase__ = RobertaTokenizer.from_pretrained('''roberta-base''' )
lowerCAmelCase__ = tokenizer.encode_plus(__lowerCAmelCase )['''input_ids''']
# Get gluon output
lowerCAmelCase__ = mx.nd.array([input_ids] )
lowerCAmelCase__ = original_bort(inputs=__lowerCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCAmelCase )
lowerCAmelCase__ = BertModel.from_pretrained(__lowerCAmelCase )
hf_bort_model.eval()
lowerCAmelCase__ = tokenizer.encode_plus(__lowerCAmelCase , return_tensors='''pt''' )
lowerCAmelCase__ = hf_bort_model(**__lowerCAmelCase )[0]
lowerCAmelCase__ = output_gluon[0].asnumpy()
lowerCAmelCase__ = output_hf[0].detach().numpy()
lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase__ = np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__magic_name__ : int = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 615
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a : Any = logging.get_logger(__name__)
def a__ ( a : Optional[Any] ):
"""simple docstring"""
_snake_case : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_snake_case : Optional[Any] = [144, 192, 240]
_snake_case : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_snake_case : Tuple = [96, 120, 144]
_snake_case : Tuple = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_snake_case : Dict = [64, 80, 96]
_snake_case : Tuple = [16, 16, 24, 48, 64, 80, 320]
_snake_case : int = 0.05
_snake_case : Tuple = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
_snake_case : str = 512
_snake_case : Optional[int] = 16
_snake_case : Union[str, Any] = 21
_snake_case : str = "pascal-voc-id2label.json"
else:
_snake_case : Union[str, Any] = 1_000
_snake_case : int = "imagenet-1k-id2label.json"
_snake_case : List[Any] = "huggingface/label-files"
_snake_case : Optional[Any] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="dataset" ) , "r" ) )
_snake_case : Optional[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
_snake_case : Dict = idalabel
_snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def a__ ( a : int , a : Tuple=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if f'layer_{i}.' in name:
_snake_case : Dict = name.replace(f'layer_{i}.' , f'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_snake_case : Any = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
_snake_case : Optional[Any] = name.replace(".block." , "." )
if "exp_1x1" in name:
_snake_case : Optional[int] = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
_snake_case : Tuple = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
_snake_case : Optional[Any] = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
_snake_case : Tuple = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
_snake_case : Any = name.replace(".norm." , ".normalization." )
if ".conv." in name:
_snake_case : List[Any] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
_snake_case : List[str] = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f'.{i}.{j}.' in name:
_snake_case : Any = name.replace(f'.{i}.{j}.' , f'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f'.{i}.{j}.' in name:
_snake_case : Optional[int] = name.replace(f'.{i}.{j}.' , f'.{i}.' )
if "expand_1x1" in name:
_snake_case : Dict = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
_snake_case : Tuple = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
_snake_case : List[str] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if f'.global_rep.{i}.weight' in name:
_snake_case : List[Any] = name.replace(f'.global_rep.{i}.weight' , ".layernorm.weight" )
if f'.global_rep.{i}.bias' in name:
_snake_case : Optional[Any] = name.replace(f'.global_rep.{i}.bias' , ".layernorm.bias" )
if ".global_rep." in name:
_snake_case : List[str] = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
_snake_case : Any = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
_snake_case : int = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
_snake_case : Union[str, Any] = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
_snake_case : Any = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
_snake_case : Dict = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
_snake_case : Any = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
_snake_case : List[str] = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
_snake_case : Union[str, Any] = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
_snake_case : List[str] = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
_snake_case : Tuple = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
_snake_case : Any = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
_snake_case : Tuple = "mobilevit." + name
return name
def a__ ( a : Optional[Any] , a : Dict , a : Optional[Any]=False ):
"""simple docstring"""
if base_model:
_snake_case : Tuple = ""
else:
_snake_case : str = "mobilevit."
for key in orig_state_dict.copy().keys():
_snake_case : int = orig_state_dict.pop(__UpperCAmelCase )
if key[:8] == "encoder.":
_snake_case : Dict = key[8:]
if "qkv" in key:
_snake_case : List[str] = key.split("." )
_snake_case : Union[str, Any] = int(key_split[0][6:] ) - 1
_snake_case : int = int(key_split[3] )
_snake_case : Union[str, Any] = model.get_submodule(f'{model_prefix}encoder.layer.{layer_num}' )
_snake_case : Any = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_snake_case : List[Any] = (
f'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_snake_case : Tuple = val[:dim, :]
_snake_case : Optional[int] = val[dim : dim * 2, :]
_snake_case : Union[str, Any] = val[-dim:, :]
else:
_snake_case : int = val[:dim]
_snake_case : Any = val[dim : dim * 2]
_snake_case : Optional[Any] = val[-dim:]
else:
_snake_case : List[str] = val
return orig_state_dict
def a__ ( ):
"""simple docstring"""
_snake_case : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case : Dict = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( a : List[str] , a : Tuple , a : Union[str, Any] , a : List[str]=False ):
"""simple docstring"""
_snake_case : Dict = get_mobilevit_config(__UpperCAmelCase )
# load original state_dict
_snake_case : str = torch.load(__UpperCAmelCase , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
_snake_case : Dict = MobileViTForSemanticSegmentation(__UpperCAmelCase ).eval()
else:
_snake_case : Union[str, Any] = MobileViTForImageClassification(__UpperCAmelCase ).eval()
_snake_case : List[Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : int = image_processor(images=prepare_img() , return_tensors="pt" )
_snake_case : Optional[int] = model(**__UpperCAmelCase )
_snake_case : Any = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_snake_case : Dict = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_snake_case : Optional[Any] = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_snake_case : Union[str, Any] = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8624, -9.5964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_snake_case : List[Any] = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_snake_case : str = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_snake_case : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
_snake_case : Tuple = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
_snake_case : str = model_mapping[mobilevit_name]
image_processor.push_to_hub(__UpperCAmelCase , organization="apple" )
model.push_to_hub(__UpperCAmelCase , organization="apple" )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : int = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 713
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a__ ( a : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCAmelCase ( _snake_case):
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
_snake_case : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ):
_snake_case : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
_snake_case : Optional[int] = model_type
_snake_case : Any = tf_checkpoint
_snake_case : Optional[int] = pytorch_dump_output
_snake_case : Tuple = config
_snake_case : Tuple = finetuning_task_name
def lowerCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
if "ckpt" in self._tf_checkpoint.lower():
_snake_case : int = self._tf_checkpoint
_snake_case : Optional[Any] = ""
else:
_snake_case : Optional[int] = self._tf_checkpoint
_snake_case : List[str] = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ , self._config , self._pytorch_dump_output , snake_case_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 87
| 0
|
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> None:
UpperCamelCase__ : Tuple = nn.ModuleList([src_layers[i] for i in layers_to_copy])
assert len(lowerCamelCase_) == len(lowerCamelCase_), f'{len(lowerCamelCase_)} != {len(lowerCamelCase_)}'
dest_layers.load_state_dict(layers_to_copy.state_dict())
lowerCAmelCase__ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCAmelCase__ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]:
try:
UpperCamelCase__ : Dict = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
f' {n_student}')
return list(range(lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> List[int]:
if n_student > n_teacher:
raise ValueError(f'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}')
elif n_teacher == n_student:
return list(range(lowerCamelCase_))
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ = "student" , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
UpperCamelCase__ : Tuple = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase_ , lowerCamelCase_):
AutoTokenizer.from_pretrained(lowerCamelCase_).save_pretrained(lowerCamelCase_) # purely for convenience
UpperCamelCase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_).eval()
else:
assert isinstance(lowerCamelCase_ , lowerCamelCase_), f'teacher must be a model or string got type {type(lowerCamelCase_)}'
UpperCamelCase__ : List[str] = teacher.config.to_diff_dict()
try:
UpperCamelCase__, UpperCamelCase__ : Any = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCamelCase__ : Optional[Any] = teacher_e
if d is None:
UpperCamelCase__ : Union[str, Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d})
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers'):
UpperCamelCase__, UpperCamelCase__ : Dict = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCamelCase__, UpperCamelCase__ : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCamelCase__ : List[str] = teacher_e
if d is None:
UpperCamelCase__ : List[str] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers'):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d})
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d})
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase_)
# Copy weights
UpperCamelCase__ : Dict = teacher.config_class(**lowerCamelCase_)
UpperCamelCase__ : Any = AutoModelForSeqaSeqLM.from_config(lowerCamelCase_)
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCamelCase__ : Dict = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase_)
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCamelCase__, UpperCamelCase__ : List[Any] = list(range(lowerCamelCase_)), list(range(lowerCamelCase_))
logger.info(
f'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
f' {save_path}')
student.save_pretrained(lowerCamelCase_)
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCamelCase__ : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_)
if d_layers_to_copy is None:
UpperCamelCase__ : List[int] = pick_layers_to_copy(lowerCamelCase_ , lowerCamelCase_)
try:
if hasattr(
lowerCamelCase_ , 'prophetnet'): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase_)
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase_)
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase_)
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase_)
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase_)
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase_)
logger.info(
f'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}')
UpperCamelCase__ : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase_)
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 596
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase__ = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = TaTokenizer
_lowerCamelCase = []
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : List[Any]="<pad>" , UpperCAmelCase_ : Union[str, Any]=100 , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : List[Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ : Any = [F'<extra_id_{i}>' for i in range(UpperCAmelCase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCamelCase__ : Union[str, Any] = len(set(filter(lambda UpperCAmelCase_: bool('extra_id_' in str(UpperCAmelCase_)) , UpperCAmelCase_)))
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Union[str, Any] = vocab_file
UpperCamelCase__ : Optional[Any] = False if not self.vocab_file else True
UpperCamelCase__ : Tuple = extra_ids
@staticmethod
def __UpperCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCamelCase__ : Union[str, Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCAmelCase_ , )
return max_model_length
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_):
copyfile(self.vocab_file , UpperCAmelCase_)
logger.info(F'Copy vocab file to {out_vocab_file}')
return (out_vocab_file,)
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCamelCase__ : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCamelCase ( self : Tuple):
return list(
set(filter(lambda UpperCAmelCase_: bool(re.search(R'<extra_id_\d+>' , UpperCAmelCase_)) is not None , self.additional_special_tokens)))
def __UpperCamelCase ( self : Dict):
return [self.convert_tokens_to_ids(UpperCAmelCase_) for token in self.get_sentinel_tokens()]
| 596
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
snake_case : Any = pytest.mark.integration
@require_faiss
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
a__ = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
import faiss
a__ = self._create_dummy_dataset()
a__ = dset.map(
lambda __snake_case ,__snake_case : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=__snake_case ,keep_in_memory=__snake_case )
a__ = dset.add_faiss_index('vecs' ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT )
a__ , a__ = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
dset.drop_index('vecs' )
def lowerCamelCase__( self :Any ) -> str:
import faiss
a__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
a__ , a__ = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def lowerCamelCase__( self :int ) -> List[Any]:
import faiss
a__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
dset.save_faiss_index('vecs' ,tmp_file.name )
dset.load_faiss_index('vecs2' ,tmp_file.name )
os.unlink(tmp_file.name )
a__ , a__ = dset.get_nearest_examples('vecs2' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
a__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__snake_case ,partial(dset.get_nearest_examples ,'vecs2' ,np.ones(5 ,dtype=np.floataa ) ) )
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
a__ = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
a__ = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
a__ = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
a__ = Elasticsearch()
dset.add_elasticsearch_index('filename' ,es_client=__snake_case )
a__ , a__ = dset.get_nearest_examples('filename' ,'my_name-train_29' )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
@require_faiss
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :str ) -> Union[str, Any]:
import faiss
a__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
a__ = np.zeros(5 ,dtype=np.floataa )
a__ = 1
a__ , a__ = index.search(__snake_case )
self.assertRaises(__snake_case ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
a__ = np.eye(5 ,dtype=np.floataa )[::-1]
a__ , a__ = index.search_batch(__snake_case )
self.assertRaises(__snake_case ,index.search_batch ,queries[0] )
a__ = [scores[0] for scores in total_scores]
a__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
import faiss
a__ = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
a__ = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(__snake_case ):
a__ = FaissIndex(string_factory='Flat' ,custom_index=faiss.IndexFlat(5 ) )
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
import faiss
a__ = faiss.IndexFlat(5 )
a__ = FaissIndex(custom_index=__snake_case )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def lowerCamelCase__( self :int ) -> Optional[Any]:
import faiss
a__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
index.save(tmp_file.name )
a__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a__ = np.zeros(5 ,dtype=np.floataa )
a__ = 1
a__ , a__ = index.search(__snake_case )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def __lowercase ( __lowerCAmelCase : Dict ):
import faiss
a__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a__ = 'index.faiss'
a__ = F'mock://{index_name}'
index.save(__lowerCAmelCase , storage_options=mockfs.storage_options )
a__ = FaissIndex.load(__lowerCAmelCase , storage_options=mockfs.storage_options )
a__ = np.zeros(5 , dtype=np.floataa )
a__ = 1
a__ , a__ = index.search(__lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
a__ = Elasticsearch()
a__ = {'acknowledged': True}
a__ = ElasticSearchIndex(es_client=__snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
a__ = 'foo'
a__ = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
a__ , a__ = index.search(__snake_case )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
a__ = 'foo'
a__ = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
a__ , a__ = index.search(__snake_case ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
a__ = ['foo', 'bar', 'foobar']
a__ = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
a__ , a__ = index.search_batch(__snake_case )
a__ = [scores[0] for scores in total_scores]
a__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) ,0 )
self.assertListEqual([1, 1, 1] ,__snake_case )
# batched queries with timeout
a__ = ['foo', 'bar', 'foobar']
a__ = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
a__ , a__ = index.search_batch(__snake_case ,request_timeout=30 )
a__ = [scores[0] for scores in total_scores]
a__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) ,0 )
self.assertListEqual([1, 1, 1] ,__snake_case )
| 657
|
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657
| 1
|
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : list[int] = []
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Union[str, Any] = sum(SCREAMING_SNAKE_CASE_ )
create_state_space_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return result
def a__ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
if sum(SCREAMING_SNAKE_CASE_ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE_ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE_ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE_ )
return
for index in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE_ , remaining_nums_sum - nums[index] , )
lowerCAmelCase__ = [3, 34, 4, 12, 5, 2]
lowerCAmelCase__ = 9
lowerCAmelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 645
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class snake_case__ ( __A ):
UpperCAmelCase : Tuple = """switch_transformers"""
UpperCAmelCase : Optional[int] = ["""past_key_values"""]
UpperCAmelCase : List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCamelCase_=32128 , UpperCamelCase_=768 , UpperCamelCase_=64 , UpperCamelCase_=2048 , UpperCamelCase_=64 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=12 , UpperCamelCase_=8 , UpperCamelCase_=False , UpperCamelCase_=0.01 , UpperCamelCase_="float32" , UpperCamelCase_=False , UpperCamelCase_=32 , UpperCamelCase_=128 , UpperCamelCase_=0.1 , UpperCamelCase_=1e-6 , UpperCamelCase_=0.001 , UpperCamelCase_=0.001 , UpperCamelCase_=1.0 , UpperCamelCase_="relu" , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0 , UpperCamelCase_=1 , **UpperCamelCase_ , ) -> str:
"""simple docstring"""
a_ : str = vocab_size
a_ : Dict = d_model
a_ : int = d_kv
a_ : Optional[int] = d_ff
a_ : str = num_sparse_encoder_layers
a_ : List[str] = num_layers
a_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : Union[str, Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ : str = self.num_layers // self.num_sparse_encoder_layers
else:
a_ : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ : str = self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ : List[str] = num_heads
a_ : Any = num_experts
a_ : List[Any] = expert_capacity
a_ : Any = router_bias
a_ : str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
a_ : Optional[int] = router_dtype
a_ : List[Any] = router_ignore_padding_tokens
a_ : Union[str, Any] = relative_attention_num_buckets
a_ : List[str] = relative_attention_max_distance
a_ : List[Any] = dropout_rate
a_ : Any = layer_norm_epsilon
a_ : Tuple = initializer_factor
a_ : Optional[int] = feed_forward_proj
a_ : Dict = use_cache
a_ : str = add_router_probs
a_ : Dict = router_z_loss_coef
a_ : Any = router_aux_loss_coef
a_ : Union[str, Any] = self.feed_forward_proj.split("""-""" )
a_ : str = act_info[-1]
a_ : Optional[Any] = act_info[0] == """gated"""
if len(UpperCamelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ : Optional[Any] = """gelu_new"""
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ , )
| 419
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( _lowercase ):
def __init__(self , A , A , A , A , A , A , A , A , A , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=A , speech_processor=A , vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , )
def UpperCAmelCase__ (self , A = "auto" ):
if slice_size == "auto":
lowerCamelCase_ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ (self ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__(self , A , A=1_6_0_0_0 , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ):
lowerCamelCase_ : Any = self.speech_processor.feature_extractor(
A , return_tensors='''pt''' , sampling_rate=A ).input_features.to(self.device )
lowerCamelCase_ : Optional[Any] = self.speech_model.generate(A , max_length=4_8_0_0_0_0 )
lowerCamelCase_ : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(A , skip_special_tokens=A , normalize=A )[
0
]
if isinstance(A , A ):
lowerCamelCase_ : List[str] = 1
elif isinstance(A , A ):
lowerCamelCase_ : List[str] = len(A )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A )}.""" )
# get prompt text embeddings
lowerCamelCase_ : List[Any] = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
lowerCamelCase_ : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCamelCase_ : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase_ : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ : Optional[int] = text_embeddings.shape
lowerCamelCase_ : List[str] = text_embeddings.repeat(1 , A , 1 )
lowerCamelCase_ : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ : List[str]
if negative_prompt is None:
lowerCamelCase_ : Union[str, Any] = [''''''] * batch_size
elif type(A ) is not type(A ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A )} !="""
F""" {type(A )}.""" )
elif isinstance(A , A ):
lowerCamelCase_ : Union[str, Any] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowerCamelCase_ : List[Any] = negative_prompt
lowerCamelCase_ : str = text_input_ids.shape[-1]
lowerCamelCase_ : List[str] = self.tokenizer(
A , padding='''max_length''' , max_length=A , truncation=A , return_tensors='''pt''' , )
lowerCamelCase_ : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ : str = uncond_embeddings.shape[1]
lowerCamelCase_ : Tuple = uncond_embeddings.repeat(1 , A , 1 )
lowerCamelCase_ : str = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ : Tuple = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
lowerCamelCase_ : Optional[Any] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCamelCase_ : Union[str, Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Optional[int] = {}
if accepts_eta:
lowerCamelCase_ : List[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : Optional[int] = self.scheduler.scale_model_input(A , A )
# predict the noise residual
lowerCamelCase_ : Optional[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ : str = noise_pred.chunk(2 )
lowerCamelCase_ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
lowerCamelCase_ : Dict = 1 / 0.1_82_15 * latents
lowerCamelCase_ : Any = self.vae.decode(A ).sample
lowerCamelCase_ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ : Union[str, Any] = self.numpy_to_pil(A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 704
|
'''simple docstring'''
def lowercase_ ( _lowercase = 1_000 ) -> int:
'''simple docstring'''
lowerCamelCase_ : Any = -1
lowerCamelCase_ : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase_ : Any = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase_ : int = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase_ : int = a * b * c
if candidate >= product:
lowerCamelCase_ : Any = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 357
| 0
|
class __A :
def __init__( self :List[str] , __snake_case :str , __snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : int =name
__magic_name__ : Optional[int] =val
def __str__( self :Any ):
'''simple docstring'''
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __snake_case :Any ):
'''simple docstring'''
return self.val < other.val
class __A :
def __init__( self :List[str] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Optional[Any] ={}
__magic_name__ : Optional[int] ={}
__magic_name__ : Union[str, Any] =self.build_heap(__snake_case )
def __getitem__( self :Union[str, Any] , __snake_case :int ):
'''simple docstring'''
return self.get_value(__snake_case )
def A__ ( self :Dict , __snake_case :List[str] ):
'''simple docstring'''
return (idx - 1) // 2
def A__ ( self :Any , __snake_case :Dict ):
'''simple docstring'''
return idx * 2 + 1
def A__ ( self :int , __snake_case :Dict ):
'''simple docstring'''
return idx * 2 + 2
def A__ ( self :str , __snake_case :Optional[Any] ):
'''simple docstring'''
return self.heap_dict[key]
def A__ ( self :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =len(__snake_case ) - 1
__magic_name__ : List[Any] =self.get_parent_idx(__snake_case )
for idx, i in enumerate(__snake_case ):
__magic_name__ : Dict =idx
__magic_name__ : str =i.val
for i in range(__snake_case , -1 , -1 ):
self.sift_down(__snake_case , __snake_case )
return array
def A__ ( self :Dict , __snake_case :Optional[Any] , __snake_case :Optional[Any] ):
'''simple docstring'''
while True:
__magic_name__ : int =self.get_left_child_idx(__snake_case ) # noqa: E741
__magic_name__ : List[str] =self.get_right_child_idx(__snake_case )
__magic_name__ : Tuple =idx
if l < len(__snake_case ) and array[l] < array[idx]:
__magic_name__ : Dict =l
if r < len(__snake_case ) and array[r] < array[smallest]:
__magic_name__ : List[str] =r
if smallest != idx:
__magic_name__ , __magic_name__ : int =array[smallest], array[idx]
(
(
__magic_name__
) , (
__magic_name__
) ,
) : int =(
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__magic_name__ : Any =smallest
else:
break
def A__ ( self :int , __snake_case :Tuple ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.get_parent_idx(__snake_case )
while p >= 0 and self.heap[p] > self.heap[idx]:
__magic_name__ , __magic_name__ : str =self.heap[idx], self.heap[p]
__magic_name__ , __magic_name__ : Dict =(
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__magic_name__ : Union[str, Any] =p
__magic_name__ : Tuple =self.get_parent_idx(__snake_case )
def A__ ( self :List[Any] ):
'''simple docstring'''
return self.heap[0]
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : List[Any] =self.heap[-1], self.heap[0]
__magic_name__ , __magic_name__ : Optional[Any] =(
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__magic_name__ : Tuple =self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def A__ ( self :List[Any] , __snake_case :Any ):
'''simple docstring'''
self.heap.append(__snake_case )
__magic_name__ : Dict =len(self.heap ) - 1
__magic_name__ : List[Any] =node.val
self.sift_up(len(self.heap ) - 1 )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return len(self.heap ) == 0
def A__ ( self :int , __snake_case :List[Any] , __snake_case :Tuple ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__magic_name__ : Dict =new_value
__magic_name__ : List[str] =new_value
self.sift_up(self.idx_of_element[node] )
UpperCAmelCase_ : List[str] = Node("R", -1)
UpperCAmelCase_ : Optional[Any] = Node("B", 6)
UpperCAmelCase_ : Optional[int] = Node("A", 3)
UpperCAmelCase_ : Optional[Any] = Node("X", 1)
UpperCAmelCase_ : List[Any] = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCAmelCase_ : Any = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 'efficientformer'
def __init__( self , a__ = [3, 2, 6, 4] , a__ = [48, 96, 224, 448] , a__ = [True, True, True, True] , a__ = 448 , a__ = 32 , a__ = 4 , a__ = 7 , a__ = 5 , a__ = 8 , a__ = 4 , a__ = 0.0 , a__ = 16 , a__ = 3 , a__ = 3 , a__ = 3 , a__ = 2 , a__ = 1 , a__ = 0.0 , a__ = 1 , a__ = True , a__ = True , a__ = 1e-5 , a__ = "gelu" , a__ = 0.02 , a__ = 1e-12 , a__ = 224 , a__ = 1e-05 , **a__ , ) -> None:
super().__init__(**a__ )
A = hidden_act
A = hidden_dropout_prob
A = hidden_sizes
A = num_hidden_layers
A = num_attention_heads
A = initializer_range
A = layer_norm_eps
A = patch_size
A = num_channels
A = depths
A = mlp_expansion_ratio
A = downsamples
A = dim
A = key_dim
A = attention_ratio
A = resolution
A = pool_size
A = downsample_patch_size
A = downsample_stride
A = downsample_pad
A = drop_path_rate
A = num_metaad_blocks
A = distillation
A = use_layer_scale
A = layer_scale_init_value
A = image_size
A = batch_norm_eps
| 641
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
a : List[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return 12
@property
def A ( self : List[Any] ):
"""simple docstring"""
return 12
@property
def A ( self : Tuple ):
"""simple docstring"""
return 32
@property
def A ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def A ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a_ )
@property
def A ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = 12
__snake_case = 12
__snake_case = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
__snake_case = TransformeraDModel(**a_ )
return model
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = "cpu"
__snake_case = self.dummy_vqvae
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_transformer
__snake_case = VQDiffusionScheduler(self.num_embed )
__snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=a_ )
__snake_case = VQDiffusionPipeline(
vqvae=a_ , text_encoder=a_ , tokenizer=a_ , transformer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "teddy bear playing in the pool"
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="np" )
__snake_case = output.images
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipe(
[prompt] , generator=a_ , output_type="np" , return_dict=a_ , num_inference_steps=2 )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = "cpu"
__snake_case = self.dummy_vqvae
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_transformer
__snake_case = VQDiffusionScheduler(self.num_embed )
__snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=a_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__snake_case = VQDiffusionPipeline(
vqvae=a_ , text_encoder=a_ , tokenizer=a_ , transformer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "teddy bear playing in the pool"
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="np" )
__snake_case = output.images
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipe(
[prompt] , generator=a_ , output_type="np" , return_dict=a_ , num_inference_steps=2 )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
__snake_case = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
__snake_case = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a_ , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 717
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : list[int] , __A : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__A ) )
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int , __A : list[int] , __A : int ) -> bool:
# Base Case
if index == len(__A ):
return True
# Recursive Step
for i in range(__A ):
if valid_coloring(graph[index] , __A , __A ):
# Color current vertex
_SCREAMING_SNAKE_CASE = i
# Validate coloring
if util_color(__A , __A , __A , index + 1 ):
return True
# Backtrack
_SCREAMING_SNAKE_CASE = -1
return False
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int ) -> list[int]:
_SCREAMING_SNAKE_CASE = [-1] * len(__A )
if util_color(__A , __A , __A , 0 ):
return colored_vertices
return []
| 418
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = LEDTokenizer
lowerCamelCase_ = LEDTokenizerFast
lowerCamelCase_ = True
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_SCREAMING_SNAKE_CASE = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] , **__lowerCamelCase : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCAmelCase_ ( self : Optional[Any] , **__lowerCamelCase : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_SCREAMING_SNAKE_CASE = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , max_length=len(__lowerCamelCase ) , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_torch
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertNotIn("labels" , __lowerCamelCase )
self.assertNotIn("decoder_attention_mask" , __lowerCamelCase )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE = tokenizer(text_target=__lowerCamelCase , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["A long paragraph for summarization."]
_SCREAMING_SNAKE_CASE = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" )
_SCREAMING_SNAKE_CASE = tokenizer(text_target=__lowerCamelCase , return_tensors="pt" )
_SCREAMING_SNAKE_CASE = inputs["input_ids"]
_SCREAMING_SNAKE_CASE = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE = ["Summary of the text.", "Another summary."]
_SCREAMING_SNAKE_CASE = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = [[0] * len(__lowerCamelCase ) for x in encoded_output["input_ids"]]
_SCREAMING_SNAKE_CASE = tokenizer.pad(__lowerCamelCase )
self.assertSequenceEqual(outputs["global_attention_mask"] , __lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE = "A, <mask> AllenNLP sentence."
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 418
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _UpperCamelCase ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : str , **lowerCAmelCase__ : str ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def _UpperCamelCase ( self : Dict ) -> str:
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def _UpperCamelCase ( self : int ) -> Tuple:
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def _UpperCamelCase ( self : Tuple ) -> List[str]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="pt" )
self.assertIn("input_ids" , lowerCAmelCase__ )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertNotIn("labels" , lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase__ )
@require_torch
def _UpperCamelCase ( self : int ) -> int:
UpperCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _UpperCamelCase ( self : Any ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _UpperCamelCase ( self : Dict ) -> Tuple:
UpperCAmelCase = ["A long paragraph for summarization."]
UpperCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = tokenizer(text_target=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase = inputs["input_ids"]
UpperCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase = ["Summary of the text.", "Another summary."]
UpperCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCAmelCase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
UpperCAmelCase = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
UpperCAmelCase = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase__ )
def _UpperCamelCase ( self : List[str] ) -> int:
pass
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 1
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1
| 1
|
'''simple docstring'''
from itertools import product
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = sides_number
_snake_case = max_face_number * dice_number
_snake_case = [0] * (max_total + 1)
_snake_case = 1
_snake_case = range(_lowercase , max_face_number + 1 )
for dice_numbers in product(_lowercase , repeat=_lowercase ):
_snake_case = sum(_lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_snake_case = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_snake_case = 0
_snake_case = 9
_snake_case = 4 * 9
_snake_case = 6
for peter_total in range(_lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_snake_case = (4**9) * (6**6)
_snake_case = peter_wins_count / total_games_number
_snake_case = round(_lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 585
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_snake_case , _snake_case = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_snake_case = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_snake_case = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_snake_case = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 282
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {'vocab_file': 'sentencepiece.model'}
A = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
A = {
'google/rembert': 256,
}
class __snake_case ( a__):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, A, A=False, A=True, A=True, A="[CLS]", A="[SEP]", A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", **A, ):
"""simple docstring"""
super().__init__(
do_lower_case=A, remove_space=A, keep_accents=A, bos_token=A, eos_token=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, **A, )
lowerCamelCase : Tuple = do_lower_case
lowerCamelCase : int = remove_space
lowerCamelCase : Optional[Any] = keep_accents
lowerCamelCase : Union[str, Any] = vocab_file
lowerCamelCase : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(A )
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.__dict__.copy()
lowerCamelCase : Tuple = None
return state
def __setstate__( self, A ):
"""simple docstring"""
lowerCamelCase : List[str] = d
lowerCamelCase : Tuple = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self, A, A=False ):
"""simple docstring"""
lowerCamelCase : int = self.sp_model.EncodeAsPieces(A )
return pieces
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
return self.sp_model.PieceToId(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
return self.sp_model.IdToPiece(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Dict = self.sp_model.decode_pieces(A )
return out_string
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self, A, A = None, A = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
if not os.path.isdir(A ):
logger.error('Vocabulary path ({}) should be a directory'.format(A ) )
return
lowerCamelCase : List[Any] = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file, A )
return (out_vocab_file,)
| 449
|
'''simple docstring'''
# Imports
import numpy as np
class __snake_case :
def __init__( self, A=None, A=None, A=None, A=None, A=None ):
"""simple docstring"""
self.set_matricies(red=A, green=A, blue=A, red_edge=A, nir=A )
def UpperCAmelCase_ ( self, A=None, A=None, A=None, A=None, A=None ):
"""simple docstring"""
if red is not None:
lowerCamelCase : Optional[int] = red
if green is not None:
lowerCamelCase : Optional[int] = green
if blue is not None:
lowerCamelCase : List[str] = blue
if red_edge is not None:
lowerCamelCase : Tuple = red_edge
if nir is not None:
lowerCamelCase : Any = nir
return True
def UpperCAmelCase_ ( self, A="", A=None, A=None, A=None, A=None, A=None ):
"""simple docstring"""
self.set_matricies(red=A, green=A, blue=A, red_edge=A, nir=A )
lowerCamelCase : Optional[int] = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase_ ( self, A=0.08, A=1.22, A=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : str = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase_ ( self, A=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase_ ( self, A=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase_ ( self, A=None, A=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase : Tuple = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 449
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__a: Optional[int] = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__a: List[str] = 10
__a: List[str] = 256
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[MinHash]:
if len(__snake_case ) < MIN_NUM_TOKENS:
return None
_UpperCAmelCase = MinHash(num_perm=__snake_case )
for token in set(__snake_case ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Set[str]:
return {t for t in NON_ALPHA.split(__snake_case ) if len(t.strip() ) > 0}
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Any , *,
lowerCamelCase : float = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = duplication_jaccard_threshold
_UpperCAmelCase = NUM_PERM
_UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_UpperCAmelCase = defaultdict(lowerCamelCase )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : MinHash ) -> None:
"""simple docstring"""
_UpperCAmelCase = self._index.query(lowerCamelCase )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(lowerCamelCase , lowerCamelCase )
if len(lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> List[List[Dict]]:
"""simple docstring"""
_UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_UpperCAmelCase = [base] + list(lowerCamelCase )
# reformat the cluster to be a list of dict
_UpperCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowerCamelCase )
return duplicate_clusters
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : int ) -> None:
"""simple docstring"""
_UpperCAmelCase = self.get_duplicate_clusters()
with open(lowerCamelCase , """w""" ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase = element
_UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__snake_case , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Tuple:
_UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=__snake_case )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__snake_case ) ) , max_queue_size=1_0_0 ) ):
di.add(__snake_case , __snake_case )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
_UpperCAmelCase = get_tokens(__snake_case )
_UpperCAmelCase = get_tokens(__snake_case )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__a: List[str] = None
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Union[str, Any]:
_UpperCAmelCase = []
for elementa in cluster:
_UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_UpperCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(__snake_case , __snake_case ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_UpperCAmelCase = 1
extremes.append(__snake_case )
return extremes
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case ) -> List[str]:
global _shared_dataset
_UpperCAmelCase = dataset
_UpperCAmelCase = []
_UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=__snake_case )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__snake_case , __snake_case , ) , total=len(__snake_case ) , ):
extremes_list.append(__snake_case )
return extremes_list
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_UpperCAmelCase = make_duplicate_clusters(__snake_case , __snake_case )
_UpperCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_UpperCAmelCase = {}
_UpperCAmelCase = find_extremes(__snake_case , __snake_case , __snake_case )
for extremes in extremes_clusters:
for element in extremes:
_UpperCAmelCase = element
_UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_UpperCAmelCase = dataset.filter(lambda __snake_case , __snake_case : idx not in remove_indices , with_indices=__snake_case )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_UpperCAmelCase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_UpperCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"""Original dataset size: {len(__snake_case )}""" )
print(f"""Number of duplicate clusters: {len(__snake_case )}""" )
print(f"""Files in duplicate cluster: {len(__snake_case )}""" )
print(f"""Unique files in duplicate cluster: {len(__snake_case )}""" )
print(f"""Filtered dataset size: {len(__snake_case )}""" )
return ds_filter, duplicate_clusters
| 108
|
'''simple docstring'''
def lowerCamelCase ( _snake_case : int = 50_000_000 ):
'''simple docstring'''
lowercase__ = set()
lowercase__ = int((limit - 24) ** (1 / 2) )
lowercase__ = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,_snake_case ) ) )
for primea in primes:
lowercase__ = primea * primea
for primea in primes:
lowercase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ = primea * primea * primea * primea
lowercase__ = square + cube + tetr
if total >= limit:
break
ret.add(_snake_case )
return len(_snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 267
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'google/realm-cc-news-pretrained-embedder': 5_12,
'google/realm-cc-news-pretrained-encoder': 5_12,
'google/realm-cc-news-pretrained-scorer': 5_12,
'google/realm-cc-news-pretrained-openqa': 5_12,
'google/realm-orqa-nq-openqa': 5_12,
'google/realm-orqa-nq-reader': 5_12,
'google/realm-orqa-wq-openqa': 5_12,
'google/realm-orqa-wq-reader': 5_12,
}
lowerCamelCase__ = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = RealmTokenizer
def __init__( self : List[Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : List[str]="[UNK]" , UpperCamelCase_ : int="[SEP]" , UpperCamelCase_ : int="[PAD]" , UpperCamelCase_ : List[str]="[CLS]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Tuple , ) -> str:
'''simple docstring'''
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
_lowercase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
_lowercase : Optional[Any] = getattr(__lowercase , normalizer_state.pop('''type''' ) )
_lowercase : List[str] = do_lower_case
_lowercase : Tuple = strip_accents
_lowercase : Union[str, Any] = tokenize_chinese_chars
_lowercase : Any = normalizer_class(**__lowercase )
_lowercase : List[str] = do_lower_case
def __lowercase ( self : Union[str, Any] , UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = PaddingStrategy.MAX_LENGTH
_lowercase : Optional[Any] = text
_lowercase : List[Any] = kwargs.pop('''text_pair''' , __lowercase )
_lowercase : List[Any] = kwargs.pop('''return_tensors''' , __lowercase )
_lowercase : int = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
_lowercase : List[str] = batch_text_pair[idx]
else:
_lowercase : int = None
_lowercase : Optional[int] = super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
_lowercase : str = encoded_candidates.get('''input_ids''' )
_lowercase : Union[str, Any] = encoded_candidates.get('''attention_mask''' )
_lowercase : Any = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
_lowercase : List[Any] = {key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def __lowercase ( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str]=None ) -> Tuple:
'''simple docstring'''
_lowercase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> Dict:
'''simple docstring'''
_lowercase : Any = [self.sep_token_id]
_lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 715
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE( snake_case_ : float ) ->float:
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case_ , snake_case_ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _SCREAMING_SNAKE_CASE( snake_case_ : float ) ->float:
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case_ , snake_case_ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __lowerCAmelCase ( lowercase , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] =CpmAntTokenizer
_UpperCAmelCase : Dict =False
def _UpperCAmelCase ( self : int ):
super().setUp()
A_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def _UpperCAmelCase ( self : Dict ):
A_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
A_ = "今天天气真好!"
A_ = ["今天", "天气", "真", "好", "!"]
A_ = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
A_ = "今天天气真好!"
A_ = [tokenizer.bos_token] + tokens
A_ = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
A_ = tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 452
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self : Dict ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCAmelCase ):
A_ = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
A_ = FlaxAutoModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCAmelCase ):
A_ = AutoConfig.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
A_ = FlaxAutoModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
@slow
def _UpperCAmelCase ( self : List[Any] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A_ = AutoTokenizer.from_pretrained(lowerCAmelCase )
A_ = FlaxBertModel.from_pretrained(lowerCAmelCase )
A_ = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase : Tuple ):
return model(**lowerCAmelCase )
eval(**lowerCAmelCase ).block_until_ready()
@slow
def _UpperCAmelCase ( self : str ):
for model_name in ["roberta-base", "roberta-large"]:
A_ = AutoTokenizer.from_pretrained(lowerCAmelCase )
A_ = FlaxRobertaModel.from_pretrained(lowerCAmelCase )
A_ = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase : Any ):
return model(**lowerCAmelCase )
eval(**lowerCAmelCase ).block_until_ready()
def _UpperCAmelCase ( self : List[Any] ):
with self.assertRaisesRegex(
lowerCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ):
A_ = FlaxAutoModel.from_pretrained("bert-base" )
def _UpperCAmelCase ( self : Any ):
with self.assertRaisesRegex(
lowerCAmelCase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A_ = FlaxAutoModel.from_pretrained(lowerCAmelCase , revision="aaaaaa" )
def _UpperCAmelCase ( self : Dict ):
with self.assertRaisesRegex(
lowerCAmelCase , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
A_ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _UpperCAmelCase ( self : Any ):
with self.assertRaisesRegex(lowerCAmelCase , "Use `from_pt=True` to load this model" ):
A_ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 452
| 1
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase_ ( a_ , a_ , a_ ):
@register_to_config
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = nn.Embedding(snake_case__ , snake_case__ )
UpperCAmelCase = nn.Embedding(snake_case__ , snake_case__ )
UpperCAmelCase = False
UpperCAmelCase = nn.Dropout(p=snake_case__ )
UpperCAmelCase = TaConfig(
vocab_size=snake_case__ , d_model=snake_case__ , num_heads=snake_case__ , d_kv=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , feed_forward_proj=snake_case__ , is_decoder=snake_case__ , is_encoder_decoder=snake_case__ , )
UpperCAmelCase = nn.ModuleList()
for lyr_num in range(snake_case__ ):
UpperCAmelCase = TaBlock(snake_case__ )
self.encoders.append(snake_case__ )
UpperCAmelCase = TaLayerNorm(snake_case__ )
UpperCAmelCase = nn.Dropout(p=snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.token_embedder(snake_case__ )
UpperCAmelCase = encoder_input_tokens.shape[1]
UpperCAmelCase = torch.arange(snake_case__ , device=encoder_input_tokens.device )
x += self.position_encoding(snake_case__ )
UpperCAmelCase = self.dropout_pre(snake_case__ )
# inverted the attention mask
UpperCAmelCase = encoder_input_tokens.size()
UpperCAmelCase = self.get_extended_attention_mask(snake_case__ , snake_case__ )
for lyr in self.encoders:
UpperCAmelCase = lyr(snake_case__ , snake_case__ )[0]
UpperCAmelCase = self.layer_norm(snake_case__ )
return self.dropout_post(snake_case__ ), encoder_inputs_mask
| 703
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCamelCase_ ( a_ ):
_A : Any = 'big_bird'
def __init__( self , snake_case__=5_03_58 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=40_96 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ) -> List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_cache
UpperCAmelCase = rescale_embeddings
UpperCAmelCase = attention_type
UpperCAmelCase = use_bias
UpperCAmelCase = block_size
UpperCAmelCase = num_random_blocks
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 378
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Dict = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : int = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
_UpperCAmelCase : List[str] = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Optional[Any] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[Any] = SqueezeBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> Optional[int]:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : Optional[int] = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Tuple = strip_accents
_UpperCamelCase : str = tokenize_chinese_chars
_UpperCamelCase : str = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> str:
_UpperCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : str = [self.sep_token_id]
_UpperCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Dict = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683
| 1
|
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A: Optional[int] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
A: Union[str, Any] = (
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
A: Tuple = "|".join(sys.argv[1:])
A: List[Any] = re.compile(rf"""^({joined_dirs}).*?\.py$""")
A: int = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 7
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowercase = None , _lowercase = None , **_lowercase , ) -> Optional[Any]:
super().__init__(self , **_lowercase )
lowercase_ : int = repo_info
lowercase_ : List[Any] = token
lowercase_ : Union[str, Any] = None
def lowerCamelCase__ ( self ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ : str = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowercase ): {'name': str(_lowercase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self , _lowercase , _lowercase = "rb" , **_lowercase , ) -> Dict:
if not isinstance(self.repo_info , _lowercase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
lowercase_ : Optional[int] = hf_hub_url(self.repo_info.id , _lowercase , revision=self.repo_info.sha )
return fsspec.open(
_lowercase , mode=_lowercase , headers=get_authentication_headers_for_url(_lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase__ ( self , _lowercase , **_lowercase ) -> Tuple:
self._get_dirs()
lowercase_ : str = self._strip_protocol(_lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowercase )
def lowerCamelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
self._get_dirs()
lowercase_ : List[str] = PurePosixPath(path.strip('/' ) )
lowercase_ : List[str] = {}
for p, f in self.dir_cache.items():
lowercase_ : Tuple = PurePosixPath(p.strip('/' ) )
lowercase_ : Optional[int] = p.parent
if root == path:
lowercase_ : List[str] = f
lowercase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 7
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Tuple = 384
__UpperCAmelCase : Optional[int] = 7
if "tiny" in model_name:
__UpperCAmelCase : List[Any] = 96
__UpperCAmelCase : Tuple = (2, 2, 6, 2)
__UpperCAmelCase : Optional[int] = (3, 6, 12, 24)
elif "small" in model_name:
__UpperCAmelCase : List[str] = 96
__UpperCAmelCase : Tuple = (2, 2, 18, 2)
__UpperCAmelCase : Any = (3, 6, 12, 24)
elif "base" in model_name:
__UpperCAmelCase : str = 128
__UpperCAmelCase : Optional[int] = (2, 2, 18, 2)
__UpperCAmelCase : Tuple = (4, 8, 16, 32)
__UpperCAmelCase : List[Any] = 12
__UpperCAmelCase : Tuple = 512
elif "large" in model_name:
__UpperCAmelCase : Union[str, Any] = 192
__UpperCAmelCase : int = (2, 2, 18, 2)
__UpperCAmelCase : Union[str, Any] = (6, 12, 24, 48)
__UpperCAmelCase : Dict = 12
__UpperCAmelCase : Any = 768
# set label information
__UpperCAmelCase : int = 150
__UpperCAmelCase : int = "huggingface/label-files"
__UpperCAmelCase : Any = "ade20k-id2label.json"
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = SwinConfig(
embed_dim=_UpperCAmelCase , depths=_UpperCAmelCase , num_heads=_UpperCAmelCase , window_size=_UpperCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
__UpperCAmelCase : List[str] = UperNetConfig(
backbone_config=_UpperCAmelCase , auxiliary_in_channels=_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , )
return config
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : str = dct.pop(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = val
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : Tuple = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__UpperCAmelCase : str = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : str = in_proj_weight[:dim, :]
__UpperCAmelCase : Dict = in_proj_bias[: dim]
__UpperCAmelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : Optional[Any] = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : str = in_proj_bias[-dim :]
# fmt: on
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = x.shape
__UpperCAmelCase : Optional[int] = x.reshape(_UpperCAmelCase , 4 , in_channel // 4 )
__UpperCAmelCase : str = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_UpperCAmelCase , _UpperCAmelCase )
return x
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = x.shape
__UpperCAmelCase : Dict = x.reshape(_UpperCAmelCase , in_channel // 4 , 4 )
__UpperCAmelCase : List[str] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_UpperCAmelCase , _UpperCAmelCase )
return x
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = x.shape[0]
__UpperCAmelCase : List[Any] = x.reshape(4 , in_channel // 4 )
__UpperCAmelCase : List[str] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_UpperCAmelCase )
return x
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = x.shape[0]
__UpperCAmelCase : Optional[int] = x.reshape(in_channel // 4 , 4 )
__UpperCAmelCase : Dict = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_UpperCAmelCase )
return x
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
__UpperCAmelCase : str = model_name_to_url[model_name]
__UpperCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" , file_name=_UpperCAmelCase )[
"state_dict"
]
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
__UpperCAmelCase : Union[str, Any] = get_upernet_config(_UpperCAmelCase )
__UpperCAmelCase : Tuple = UperNetForSemanticSegmentation(_UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : List[str] = state_dict.pop(_UpperCAmelCase )
if "bn" in key:
__UpperCAmelCase : Optional[Any] = key.replace("bn" , "batch_norm" )
__UpperCAmelCase : Optional[Any] = val
# rename keys
__UpperCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__UpperCAmelCase : Tuple = reverse_correct_unfold_reduction_order(_UpperCAmelCase )
if "norm" in key:
__UpperCAmelCase : Dict = reverse_correct_unfold_norm_order(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# verify on image
__UpperCAmelCase : Optional[int] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : List[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
__UpperCAmelCase : List[Any] = SegformerImageProcessor()
__UpperCAmelCase : Tuple = processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Any = model(_UpperCAmelCase )
__UpperCAmelCase : Any = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__UpperCAmelCase : List[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_a : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 168
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float):
return 0.0
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_UpperCAmelCase )
plt.show()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 671
| 0
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A = logging.get_logger(__name__)
A = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
A = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
A = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
A = json.loads(f.read() )
A = collections.OrderedDict()
A = collections.OrderedDict()
A = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
A = f.readlines()
A = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase__ ):
A = b
A = idx
for wd in b:
A = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Optional[Any]="<|endoftext|>" , snake_case : List[str]="<|endoftext|>" , snake_case : Any="<|startoftext|>" , snake_case : Any="<|endoftext|>" , snake_case : Tuple=False , **snake_case : List[str] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
unk_token=snake_case , pad_token=snake_case , bos_token=snake_case , eos_token=snake_case , do_clean_text=snake_case , **snake_case , )
if not os.path.isfile(snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
A = do_clean_text
A , A , A , A = load_vocab_and_emoji(snake_case , snake_case )
A = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
return len(self.raw_vocab )
def A_ ( self : str ) -> str:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def A_ ( self : List[str] , snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(snake_case , clean=self.do_clean_text )
def A_ ( self : int , snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def A_ ( self : int , snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(snake_case )
def A_ ( self : str , snake_case : int ) -> Optional[Any]:
'''simple docstring'''
A = ''.join(snake_case ).strip()
return out_string
def A_ ( self : Optional[int] , snake_case : "Conversation" ) -> List[int]:
'''simple docstring'''
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
def A_ ( self : int , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A = 0
if os.path.isdir(snake_case ):
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
A = token_index
writer.write(','.join(snake_case ) + '\n' )
index += 1
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , snake_case )
return vocab_file, emoji_file
class UpperCAmelCase__ ( UpperCamelCase ):
def __init__( self : str , snake_case : Dict , snake_case : Optional[Any] , snake_case : List[Any] ) -> int:
'''simple docstring'''
A = vocab # same as swe
A = ids_to_tokens # same as bpe
A = emoji
A = np.max([len(snake_case ) for w in self.vocab.keys()] )
A = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
A = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
A = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
A = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
A = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
A = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
A = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : List[str] ) -> List[str]:
'''simple docstring'''
return len(self.ids_to_tokens )
def A_ ( self : Tuple , snake_case : Any ) -> Optional[int]:
'''simple docstring'''
A = self.content_repattera.sub('<URL>' , snake_case )
A = self.content_repattera.sub('<EMAIL>' , snake_case )
A = self.content_repattera.sub('<TEL>' , snake_case )
A = self.content_repattera.sub('<DATE>' , snake_case )
A = self.content_repattera.sub('<DATE>' , snake_case )
A = self.content_repattera.sub('<PRICE>' , snake_case )
A = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def A_ ( self : Any , snake_case : int , snake_case : Any=False ) -> Any:
'''simple docstring'''
A = text.replace(' ' , '<SP>' )
A = text.replace(' ' , '<SP>' )
A = text.replace('\r\n' , '<BR>' )
A = text.replace('\n' , '<BR>' )
A = text.replace('\r' , '<BR>' )
A = text.replace('\t' , '<TAB>' )
A = text.replace('—' , 'ー' )
A = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
A = text.replace(snake_case , snake_case )
if clean:
A = self.clean_text(snake_case )
def check_simbol(snake_case : Union[str, Any] ):
A = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 2:
A = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(snake_case : List[Any] ):
A = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 3:
A = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
A = 0
A = []
while pos < len(snake_case ):
A = min(len(snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
A = [] # (token_id, token, pos)
for e in range(snake_case , snake_case , -1 ):
A = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case ) > 2:
A = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case ) > 0:
# the smallest token_id is adopted
A , A , A = sorted(snake_case , key=lambda snake_case : x[0] )[0]
result.append(snake_case )
A = e
else:
A = pos + 1
A = text[pos:end]
if check_simbol(snake_case ):
result.append('<KIGOU>' )
elif checkuae(snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
A = end
return result
def A_ ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any]="\n" ) -> List[Any]:
'''simple docstring'''
A = []
A = []
A = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
A = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case )
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
A = ''.join(snake_case )
return text
| 109
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
A = tempfile.mkdtemp()
A = BlipImageProcessor()
A = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
A = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
A = InstructBlipProcessor(snake_case , snake_case , snake_case )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : List[str] , **snake_case : str ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer
def A_ ( self : int , **snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def A_ ( self : Any , **snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).qformer_tokenizer
def A_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
self.assertIsInstance(processor.qformer_tokenizer , snake_case )
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = self.prepare_image_inputs()
A = image_processor(snake_case , return_tensors='np' )
A = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = 'lower newer'
A = processor(text=snake_case )
A = tokenizer(snake_case , return_token_type_ids=snake_case )
A = qformer_tokenizer(snake_case , return_token_type_ids=snake_case )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = 'lower newer'
A = self.prepare_image_inputs()
A = processor(text=snake_case , images=snake_case )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(snake_case )
A = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
A = self.get_image_processor()
A = self.get_tokenizer()
A = self.get_qformer_tokenizer()
A = InstructBlipProcessor(
tokenizer=snake_case , image_processor=snake_case , qformer_tokenizer=snake_case )
A = 'lower newer'
A = self.prepare_image_inputs()
A = processor(text=snake_case , images=snake_case )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 109
| 1
|
import argparse
import json
from tqdm import tqdm
def __snake_case ( ):
"""simple docstring"""
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" ,type=__UpperCamelCase ,default="biencoder-nq-dev.json" ,help="Path to raw DPR training data" ,)
parser.add_argument(
"--evaluation_set" ,type=__UpperCamelCase ,help="where to store parsed evaluation_set file" ,)
parser.add_argument(
"--gold_data_path" ,type=__UpperCamelCase ,help="where to store parsed gold_data_path file" ,)
A_ = parser.parse_args()
with open(args.src_path ,"r" ) as src_file, open(args.evaluation_set ,"w" ) as eval_file, open(
args.gold_data_path ,"w" ) as gold_file:
A_ = json.load(__UpperCamelCase )
for dpr_record in tqdm(__UpperCamelCase ):
A_ = dpr_record["question"]
A_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(__UpperCamelCase ) + "\n" )
if __name__ == "__main__":
main()
| 86
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 518
| 0
|
"""simple docstring"""
import os
from distutils.util import strtobool
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for e in env_keys:
__SCREAMING_SNAKE_CASE = int(os.environ.get(lowerCAmelCase_ , -1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return strtobool(lowerCAmelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_="no" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.environ.get(lowerCAmelCase_ , str(lowerCAmelCase_ ) )
return value
| 553
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = str(lowerCAmelCase_ )
return n == n[::-1]
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , lowerCAmelCase_ ):
if is_palindrome(lowerCAmelCase_ ) and is_palindrome(bin(lowerCAmelCase_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 553
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (IPNDMScheduler,)
_SCREAMING_SNAKE_CASE = (("""num_inference_steps""", 50),)
def A ( self : Optional[int] , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {'num_train_timesteps': 1_0_0_0}
config.update(**UpperCamelCase__ )
return config
def A ( self : List[str] , UpperCamelCase__ : int=0 , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , UpperCamelCase__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**UpperCamelCase__ )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
UpperCamelCase = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[:]
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any , UpperCamelCase__ : str=0 , **UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , UpperCamelCase__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[:]
if time_step is None:
UpperCamelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
UpperCamelCase = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[:]
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : int , **UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**UpperCamelCase__ )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = 1_0
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , 'set_timesteps' ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , 'set_timesteps' ):
UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCamelCase = dummy_past_residuals[:]
UpperCamelCase = scheduler.timesteps[5]
UpperCamelCase = scheduler.timesteps[6]
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : Any ):
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 430
|
'''simple docstring'''
import argparse
import os
import re
_lowerCamelCase : int = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_lowerCamelCase : Union[str, Any] = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
_lowerCamelCase : Optional[Any] = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCamelCase ( A__ , A__ = False ) -> Any:
"""simple docstring"""
with open(A__ , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = f.read()
UpperCamelCase = content.split('\n' )
UpperCamelCase = []
UpperCamelCase = 0
while line_idx < len(A__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCamelCase = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCamelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCamelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCamelCase = sorted(A__ , key=lambda A__ : _re_identifier.search(A__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A__ ) )
elif "\n".join(A__ ) != content:
return True
def __lowerCamelCase ( A__ = False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [os.path.join(A__ , A__ ) for f in os.listdir(A__ ) if f.endswith('.py' )]
UpperCamelCase = [sort_auto_mapping(A__ , overwrite=A__ ) for fname in fnames]
if not overwrite and any(A__ ):
UpperCamelCase = [f for f, d in zip(A__ , A__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(A__ )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowerCamelCase : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 430
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 319
|
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : int = '''AutoTokenizer'''
__UpperCAmelCase : Optional[Any] = ['''tokenizer''']
__UpperCAmelCase : str = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : Union[str, Any] ,_a : Union[str, Any] ,_a : Dict=None ):
'''simple docstring'''
super().__init__(_a )
_a : List[str] = speaker_embeddings
@classmethod
def __lowercase ( cls : Any ,_a : Optional[int] ,_a : Union[str, Any]="speaker_embeddings_path.json" ,**_a : Union[str, Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_a : Tuple = get_file_from_repo(
_a ,_a ,subfolder=kwargs.pop('subfolder' ,_a ) ,cache_dir=kwargs.pop('cache_dir' ,_a ) ,force_download=kwargs.pop('force_download' ,_a ) ,proxies=kwargs.pop('proxies' ,_a ) ,resume_download=kwargs.pop('resume_download' ,_a ) ,local_files_only=kwargs.pop('local_files_only' ,_a ) ,use_auth_token=kwargs.pop('use_auth_token' ,_a ) ,revision=kwargs.pop('revision' ,_a ) ,)
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(_a ,_a )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_a : List[Any] = None
else:
with open(_a ) as speaker_embeddings_json:
_a : List[str] = json.load(_a )
else:
_a : str = None
_a : Any = AutoTokenizer.from_pretrained(_a ,**_a )
return cls(tokenizer=_a ,speaker_embeddings=_a )
def __lowercase ( self : List[str] ,_a : Tuple ,_a : Any="speaker_embeddings_path.json" ,_a : Optional[int]="speaker_embeddings" ,_a : bool = False ,**_a : Optional[int] ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_a ,_a ,'v2' ) ,exist_ok=_a )
_a : Optional[Any] = {}
_a : List[str] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_a : Any = self._load_voice_preset(_a )
_a : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,_a ,F"""{prompt_key}_{key}""" ) ,voice_preset[key] ,allow_pickle=_a ,)
_a : Dict = os.path.join(_a ,F"""{prompt_key}_{key}.npy""" )
_a : Any = tmp_dict
with open(os.path.join(_a ,_a ) ,'w' ) as fp:
json.dump(_a ,_a )
super().save_pretrained(_a ,_a ,**_a )
def __lowercase ( self : Tuple ,_a : str = None ,**_a : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = self.speaker_embeddings[voice_preset]
_a : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_a : List[Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,_a ) ,cache_dir=kwargs.pop('cache_dir' ,_a ) ,force_download=kwargs.pop('force_download' ,_a ) ,proxies=kwargs.pop('proxies' ,_a ) ,resume_download=kwargs.pop('resume_download' ,_a ) ,local_files_only=kwargs.pop('local_files_only' ,_a ) ,use_auth_token=kwargs.pop('use_auth_token' ,_a ) ,revision=kwargs.pop('revision' ,_a ) ,)
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_a : Tuple = np.load(_a )
return voice_preset_dict
def __lowercase ( self : List[Any] ,_a : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Any ,_a : List[str]=None ,_a : Tuple=None ,_a : Tuple="pt" ,_a : Any=256 ,_a : Optional[Any]=False ,_a : List[str]=True ,_a : Optional[Any]=False ,**_a : Dict ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(_a ,_a ):
if (
isinstance(_a ,_a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_a : Union[str, Any] = self._load_voice_preset(_a )
else:
if isinstance(_a ,_a ) and not voice_preset.endswith('.npz' ):
_a : str = voice_preset + '.npz'
_a : Optional[int] = np.load(_a )
if voice_preset is not None:
self._validate_voice_preset_dict(_a ,**_a )
_a : List[str] = BatchFeature(data=_a ,tensor_type=_a )
_a : List[Any] = self.tokenizer(
_a ,return_tensors=_a ,padding='max_length' ,max_length=_a ,return_attention_mask=_a ,return_token_type_ids=_a ,add_special_tokens=_a ,**_a ,)
if voice_preset is not None:
_a : Dict = voice_preset
return encoded_text
| 319
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def __A ( a_ :List[Any]) -> List[str]:
__a : Dict = '''huggingface/label-files'''
__a : Dict = '''imagenet-1k-id2label.json'''
__a : List[str] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : int = {int(a_): v for k, v in idalabel.items()}
__a : Optional[int] = {v: k for k, v in idalabel.items()}
__a : Any = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__a : int = BitConfig(
conv_layer=a_ , num_labels=10_00 , idalabel=a_ , labelaid=a_ , )
return config
def __A ( a_ :Optional[int]) -> Optional[int]:
if "stem.conv" in name:
__a : Any = name.replace('''stem.conv''' , '''bit.embedder.convolution''')
if "blocks" in name:
__a : List[str] = name.replace('''blocks''' , '''layers''')
if "head.fc" in name:
__a : List[str] = name.replace('''head.fc''' , '''classifier.1''')
if name.startswith('''norm'''):
__a : str = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
__a : str = '''bit.encoder.''' + name
return name
def __A ( ) -> Optional[Any]:
__a : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : Any = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def __A ( a_ :Optional[int] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> Union[str, Any]:
__a : List[str] = get_config(a_)
# load original model from timm
__a : int = create_model(a_ , pretrained=a_)
timm_model.eval()
# load state_dict of original model
__a : List[Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
__a : Tuple = state_dict.pop(a_)
__a : Optional[int] = val.squeeze() if '''head''' in key else val
# load HuggingFace model
__a : List[str] = BitForImageClassification(a_)
model.eval()
model.load_state_dict(a_)
# create image processor
__a : Tuple = create_transform(**resolve_data_config({} , model=a_))
__a : Optional[Any] = transform.transforms
__a : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__a : List[str] = BitImageProcessor(
do_resize=a_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=a_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__a : List[str] = prepare_img()
__a : Optional[Any] = transform(a_).unsqueeze(0)
__a : Union[str, Any] = processor(a_ , return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a_ , a_)
# verify logits
with torch.no_grad():
__a : Tuple = model(a_)
__a : Any = outputs.logits
print('''Logits:''' , logits[0, :3])
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1).item()])
__a : Union[str, Any] = timm_model(a_)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a_ , outputs.logits , atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a_).mkdir(exist_ok=a_)
print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
processor.save_pretrained(a_)
if push_to_hub:
print(F"""Pushing model {model_name} and processor to the hub""")
model.push_to_hub(F"""ybelkada/{model_name}""")
processor.push_to_hub(F"""ybelkada/{model_name}""")
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
A = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[Any] = logging.get_logger(__name__)
def a_ ( __a , __a=False ):
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def a_ ( __a , __a , __a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ''''''
else:
A__ = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def a_ ( __a , __a , __a ):
A__ = dct.pop(__a )
A__ = val
def a_ ( ):
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def a_ ( __a , __a ):
A__ = DeiTConfig()
# all deit models have fine-tuned heads
A__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ = 1000
A__ = '''huggingface/label-files'''
A__ = '''imagenet-1k-id2label.json'''
A__ = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = int(deit_name[-6:-4] )
A__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
A__ = 192
A__ = 768
A__ = 12
A__ = 3
elif deit_name[9:].startswith('''small''' ):
A__ = 384
A__ = 1536
A__ = 12
A__ = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
# load original model from timm
A__ = timm.create_model(__a , pretrained=__a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
A__ = create_rename_keys(__a , __a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
A__ = DeiTForImageClassificationWithTeacher(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ = DeiTImageProcessor(size=__a , crop_size=config.image_size )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = encoding['''pixel_values''']
A__ = model(__a )
A__ = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
__snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : List[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 571
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
A : Optional[Any] ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
A : Optional[Any] =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A : str ={
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
A : Dict ={
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
A : Optional[int] =tempfile.mkdtemp()
A : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A : str =os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
# load decoder from hub
A : Any ='''hf-internal-testing/ngram-beam-search-decoder'''
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
A : int =self.add_kwargs_tokens_map.copy()
kwargs.update(UpperCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
A : Any =self.get_tokenizer()
A : Any =self.get_feature_extractor()
A : List[str] =self.get_decoder()
A : Dict =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
A : Union[str, Any] =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> str:
A : str =WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A : Tuple =WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
A : List[str] =self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(UpperCamelCase__ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=UpperCamelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : str =self.get_feature_extractor()
A : Dict =self.get_tokenizer()
A : str =self.get_decoder()
A : Optional[Any] =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Tuple =floats_list((3, 10_00) )
A : List[Any] =feature_extractor(UpperCamelCase__ , return_tensors='np' )
A : int =processor(UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
A : List[Any] =self.get_feature_extractor()
A : Optional[Any] =self.get_tokenizer()
A : List[Any] =self.get_decoder()
A : Dict =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Union[str, Any] ='''This is a test string'''
A : Optional[Any] =processor(text=UpperCamelCase__ )
A : str =tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=(2, 10, 16) , SCREAMING_SNAKE_CASE__ : int=77 ) -> Tuple:
np.random.seed(UpperCamelCase__ )
return np.random.rand(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
A : Optional[int] =self.get_feature_extractor()
A : Dict =self.get_tokenizer()
A : Optional[Any] =self.get_decoder()
A : int =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Any =self._get_dummy_logits(shape=(10, 16) , seed=13 )
A : str =processor.decode(UpperCamelCase__ )
A : List[Any] =decoder.decode_beams(UpperCamelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> str:
A : int =self.get_feature_extractor()
A : int =self.get_tokenizer()
A : int =self.get_decoder()
A : str =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Optional[Any] =self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A : Tuple =processor.batch_decode(UpperCamelCase__ )
else:
with get_context(UpperCamelCase__ ).Pool() as pool:
A : List[Any] =processor.batch_decode(UpperCamelCase__ , UpperCamelCase__ )
A : Tuple =list(UpperCamelCase__ )
with get_context('fork' ).Pool() as p:
A : str =decoder.decode_beams_batch(UpperCamelCase__ , UpperCamelCase__ )
A : int =[], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(UpperCamelCase__ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(UpperCamelCase__ , decoded_processor.logit_score )
self.assertListEqual(UpperCamelCase__ , decoded_processor.lm_score )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
A : List[Any] =self.get_feature_extractor()
A : List[str] =self.get_tokenizer()
A : Dict =self.get_decoder()
A : Optional[Any] =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : Optional[int] =self._get_dummy_logits()
A : Dict =15
A : Union[str, Any] =-20.0
A : List[Any] =-4.0
A : List[Any] =processor.batch_decode(
UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
A : Optional[Any] =decoded_processor_out.text
A : str =list(UpperCamelCase__ )
with get_context('fork' ).Pool() as pool:
A : Dict =decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
A : List[str] =[d[0][0] for d in decoded_decoder_out]
A : Any =[d[0][2] for d in decoded_decoder_out]
A : List[Any] =[d[0][3] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , UpperCamelCase__ )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , UpperCamelCase__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , UpperCamelCase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
A : Optional[int] =self.get_feature_extractor()
A : Tuple =self.get_tokenizer()
A : Optional[int] =self.get_decoder()
A : List[Any] =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
A : int =self._get_dummy_logits()
A : Optional[Any] =2.0
A : Union[str, Any] =5.0
A : Tuple =-20.0
A : Tuple =True
A : int =processor.batch_decode(
UpperCamelCase__ , alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
A : Any =decoded_processor_out.text
A : int =list(UpperCamelCase__ )
decoder.reset_params(
alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
with get_context('fork' ).Pool() as pool:
A : Optional[int] =decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , )
A : List[str] =[d[0][0] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , UpperCamelCase__ )
A : Optional[int] =processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
A : str =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
A : Any =processor.decoder.model_container[processor.decoder._model_key]
A : Optional[Any] =Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
A : Optional[Any] =os.listdir(UpperCamelCase__ )
A : Tuple =['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Optional[Any] =snapshot_download('hf-internal-testing/processor_with_lm' )
A : Dict =WavaVecaProcessorWithLM.from_pretrained(UpperCamelCase__ )
A : List[str] =processor.decoder.model_container[processor.decoder._model_key]
A : int =Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
A : Tuple =os.listdir(UpperCamelCase__ )
A : int =os.listdir(UpperCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
A : Dict =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
A : Any =AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
A : Dict =floats_list((3, 10_00) )
A : List[Any] =processor_wavaveca(UpperCamelCase__ , return_tensors='np' )
A : Tuple =processor_auto(UpperCamelCase__ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
A : int =self._get_dummy_logits()
A : str =processor_wavaveca.batch_decode(UpperCamelCase__ )
A : Union[str, Any] =processor_auto.batch_decode(UpperCamelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
A : Any =self.get_feature_extractor()
A : List[str] =self.get_tokenizer()
A : int =self.get_decoder()
A : List[Any] =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
A : Tuple =[d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
A : str =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
A : str =self._get_dummy_logits()[0]
A : str =processor.decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
A : Union[str, Any] =WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
A : Optional[int] =self._get_dummy_logits()
A : Any =processor.batch_decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(UpperCamelCase__ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
import torch
A : Any =load_dataset('common_voice' , 'en' , split='train' , streaming=UpperCamelCase__ )
A : str =ds.cast_column('audio' , datasets.Audio(sampling_rate=1_60_00 ) )
A : int =iter(UpperCamelCase__ )
A : Dict =next(UpperCamelCase__ )
A : str =AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
A : Optional[Any] =WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A : Union[str, Any] =processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
A : Union[str, Any] =model(UpperCamelCase__ ).logits.cpu().numpy()
A : Tuple =processor.decode(logits[0] , output_word_offsets=UpperCamelCase__ )
A : str =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A : List[Any] =[
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
A : Union[str, Any] ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(' '.join(self.get_from_offsets(UpperCamelCase__ , 'word' ) ) , UpperCamelCase__ )
self.assertEqual(' '.join(self.get_from_offsets(UpperCamelCase__ , 'word' ) ) , output.text )
# output times
A : List[str] =torch.tensor(self.get_from_offsets(UpperCamelCase__ , 'start_time' ) )
A : Tuple =torch.tensor(self.get_from_offsets(UpperCamelCase__ , 'end_time' ) )
# fmt: off
A : str =torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
A : List[str] =torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.0_1 ) )
| 711
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
lowercase : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
lowercase : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
lowercase : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 661
| 0
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
A_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
A_ : List[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
re.sub("""<n>""" , """""" , __magic_name__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__magic_name__ ) )
| 38
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = '''fnet'''
def __init__( self , _A=32000 , _A=768 , _A=12 , _A=3072 , _A="gelu_new" , _A=0.1 , _A=512 , _A=4 , _A=0.0_2 , _A=1e-1_2 , _A=False , _A=512 , _A=3 , _A=1 , _A=2 , **_A , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__A : str = vocab_size
__A : Union[str, Any] = max_position_embeddings
__A : Union[str, Any] = hidden_size
__A : Optional[int] = num_hidden_layers
__A : str = intermediate_size
__A : Tuple = hidden_act
__A : Tuple = hidden_dropout_prob
__A : List[Any] = initializer_range
__A : List[str] = type_vocab_size
__A : Optional[Any] = layer_norm_eps
__A : Tuple = use_tpu_fourier_optimizations
__A : Optional[int] = tpu_short_seq_length
| 239
| 0
|
class UpperCamelCase__ :
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = name
lowercase_ = value
lowercase_ = weight
def __repr__( self : Tuple ):
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.value
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self.name
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.weight
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.value / self.weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
lowercase_ = []
for i in range(len(__lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
lowercase_ = sorted(__lowerCAmelCase , key=__lowerCAmelCase , reverse=__lowerCAmelCase )
lowercase_ = []
lowercase_ = 0.0, 0.0
for i in range(len(__lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def UpperCAmelCase_ ( ) -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Tuple = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["""MobileViTFeatureExtractor"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Any = logging.get_logger(__name__)
A__ : Tuple = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
A__ : str = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
A__ : List[Any] = {
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = collections.OrderedDict()
lowercase__ = collections.OrderedDict()
lowercase__ = collections.OrderedDict()
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
lowercase__ = b
lowercase__ = idx
for wd in b:
lowercase__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, lowerCamelCase : int, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int]="<|endoftext|>", lowerCamelCase : str="<|endoftext|>", lowerCamelCase : str="<|startoftext|>", lowerCamelCase : Tuple="<|endoftext|>", lowerCamelCase : str=False, **lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase, pad_token=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, do_clean_text=lowerCamelCase, **lowerCamelCase, )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
lowercase__ = do_clean_text
lowercase__ , lowercase__ , lowercase__ , lowercase__ = load_vocab_and_emoji(lowerCamelCase, lowerCamelCase )
lowercase__ = SubWordJapaneseTokenizer(
vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji )
@property
def lowercase__ ( self : int ):
'''simple docstring'''
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return dict(self.raw_vocab, **self.added_tokens_encoder )
def lowercase__ ( self : str, lowerCamelCase : Any ):
'''simple docstring'''
return self.subword_tokenizer.tokenize(lowerCamelCase, clean=self.do_clean_text )
def lowercase__ ( self : List[Any], lowerCamelCase : List[str] ):
'''simple docstring'''
return self.vocab.get(lowerCamelCase, self.vocab.get(self.unk_token ) )
def lowercase__ ( self : Dict, lowerCamelCase : Any ):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(lowerCamelCase )
def lowercase__ ( self : Dict, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = ''''''.join(lowerCamelCase ).strip()
return out_string
def lowercase__ ( self : Any, lowerCamelCase : "Conversation" ):
'''simple docstring'''
lowercase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase, add_special_tokens=lowerCamelCase ) + [self.eos_token_id] )
if len(lowerCamelCase ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
return input_ids
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(lowerCamelCase ):
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
lowercase__ = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
lowercase__ = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase__ = token_index
writer.write(''','''.join(lowerCamelCase ) + '''\n''' )
index += 1
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
json.dump(self.emoji, lowerCamelCase )
return vocab_file, emoji_file
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = vocab # same as swe
lowercase__ = ids_to_tokens # same as bpe
lowercase__ = emoji
lowercase__ = np.max([len(lowerCamelCase ) for w in self.vocab.keys()] )
lowercase__ = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
lowercase__ = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
lowercase__ = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
lowercase__ = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowercase__ = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
lowercase__ = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
lowercase__ = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
lowercase__ = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
lowercase__ = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.ids_to_tokens )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = self.content_repattera.sub('''<URL>''', lowerCamelCase )
lowercase__ = self.content_repattera.sub('''<EMAIL>''', lowerCamelCase )
lowercase__ = self.content_repattera.sub('''<TEL>''', lowerCamelCase )
lowercase__ = self.content_repattera.sub('''<DATE>''', lowerCamelCase )
lowercase__ = self.content_repattera.sub('''<DATE>''', lowerCamelCase )
lowercase__ = self.content_repattera.sub('''<PRICE>''', lowerCamelCase )
lowercase__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowercase__ = content.replace('''<BLOCK><BLOCK>''', '''<BLOCK>''' )
return content
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : str=False ):
'''simple docstring'''
lowercase__ = text.replace(''' ''', '''<SP>''' )
lowercase__ = text.replace(''' ''', '''<SP>''' )
lowercase__ = text.replace('''\r\n''', '''<BR>''' )
lowercase__ = text.replace('''\n''', '''<BR>''' )
lowercase__ = text.replace('''\r''', '''<BR>''' )
lowercase__ = text.replace('''\t''', '''<TAB>''' )
lowercase__ = text.replace('''—''', '''ー''' )
lowercase__ = text.replace('''−''', '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
lowercase__ = text.replace(lowerCamelCase, lowerCamelCase )
if clean:
lowercase__ = self.clean_text(lowerCamelCase )
def check_simbol(lowerCamelCase : Any ):
lowercase__ = x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 2:
lowercase__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC_2_A_1 and c <= 0xC_2_B_F)
or (c >= 0xC_7_8_0 and c <= 0xC_7_8_3)
or (c >= 0xC_A_B_9 and c <= 0xC_B_B_F)
or (c >= 0xC_C_8_0 and c <= 0xC_D_A_2)
):
return True
return False
def checkuae(lowerCamelCase : Optional[Any] ):
lowercase__ = x.encode()
if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 3:
lowercase__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE_2_8_0_8_0 and c <= 0xE_2_B_0_7_F:
return True
return False
lowercase__ = 0
lowercase__ = []
while pos < len(lowerCamelCase ):
lowercase__ = min(len(lowerCamelCase ), pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
lowercase__ = [] # (token_id, token, pos)
for e in range(lowerCamelCase, lowerCamelCase, -1 ):
lowercase__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCamelCase ) > 2:
lowercase__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCamelCase ) > 0:
# the smallest token_id is adopted
lowercase__ , lowercase__ , lowercase__ = sorted(lowerCamelCase, key=lambda lowerCamelCase : x[0] )[0]
result.append(lowerCamelCase )
lowercase__ = e
else:
lowercase__ = pos + 1
lowercase__ = text[pos:end]
if check_simbol(lowerCamelCase ):
result.append('''<KIGOU>''' )
elif checkuae(lowerCamelCase ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
lowercase__ = end
return result
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : Dict="\n" ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode('''utf-8''', errors='''replace''' ) )
lowercase__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(lowerCamelCase )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
words.append(bytearray(lowerCamelCase ).decode('''utf-8''', errors='''replace''' ) )
lowercase__ = ''''''.join(lowerCamelCase )
return text
| 183
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Dict = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 703
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase :
def __init__( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int = 1_3 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : int = 1_2_8 , __UpperCAmelCase : Optional[int]=[1_6, 3_2, 6_4, 1_2_8] , __UpperCAmelCase : int = 7 , __UpperCAmelCase : int = 4 , __UpperCAmelCase : int = 3_7 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 1_0 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_2_8 , __UpperCAmelCase : List[int] = [2, 2, 2, 2] , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = encoder_stride
SCREAMING_SNAKE_CASE__ = num_attention_outputs
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = embed_dim + 1
SCREAMING_SNAKE_CASE__ = resolution
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = dim
SCREAMING_SNAKE_CASE__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : List[Any] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
def check_hidden_states_output(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int ):
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
SCREAMING_SNAKE_CASE__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE__ = seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ = outputs.decoder_hidden_states
self.asseretIsInstance(__UpperCAmelCase , (list, tuple) )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """seq_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """decoder_seq_length""" , __UpperCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any=False ) -> List[str]:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """seq_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """encoder_seq_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """key_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """chunk_length""" , __UpperCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
SCREAMING_SNAKE_CASE__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__UpperCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.assertTrue(outputs_dict is not None )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 616
| 0
|
from __future__ import annotations
import math
def _A( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , )
)
def _A( ) -> None:
'''simple docstring'''
__lowercase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowercase = math.log(len(UpperCamelCase__ ) , 2 )
print(F'Optimal value : {minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 332
|
import unittest
from knapsack import knapsack as k
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = 0
__lowercase = [0]
__lowercase = [0]
__lowercase = len(lowerCamelCase__ )
self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 0 )
__lowercase = [60]
__lowercase = [10]
__lowercase = len(lowerCamelCase__ )
self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 0 )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = 3
__lowercase = [1, 2, 3]
__lowercase = [3, 2, 1]
__lowercase = len(lowerCamelCase__ )
self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 5 )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = 50
__lowercase = [60, 100, 120]
__lowercase = [10, 20, 30]
__lowercase = len(lowerCamelCase__ )
self.assertEqual(k.knapsack(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 332
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : str = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _snake_case ( A__ ):
_lowercase : Any = '''lxmert'''
_lowercase : str = {}
def __init__( self , a=3_0522 , a=768 , a=12 , a=9500 , a=1600 , a=400 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1E-12 , a=9 , a=5 , a=5 , a=2048 , a=4 , a=6.67 , a=True , a=True , a=True , a=True , a=True , a=True , a=True , **a , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = num_qa_labels
SCREAMING_SNAKE_CASE = num_object_labels
SCREAMING_SNAKE_CASE = num_attr_labels
SCREAMING_SNAKE_CASE = l_layers
SCREAMING_SNAKE_CASE = x_layers
SCREAMING_SNAKE_CASE = r_layers
SCREAMING_SNAKE_CASE = visual_feat_dim
SCREAMING_SNAKE_CASE = visual_pos_dim
SCREAMING_SNAKE_CASE = visual_loss_normalizer
SCREAMING_SNAKE_CASE = task_matched
SCREAMING_SNAKE_CASE = task_mask_lm
SCREAMING_SNAKE_CASE = task_obj_predict
SCREAMING_SNAKE_CASE = task_qa
SCREAMING_SNAKE_CASE = visual_obj_loss
SCREAMING_SNAKE_CASE = visual_attr_loss
SCREAMING_SNAKE_CASE = visual_feat_loss
SCREAMING_SNAKE_CASE = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**a)
| 444
|
a_ : Tuple = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
a_ : List[Any] = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCamelCase__ (_UpperCAmelCase):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper())
def lowerCamelCase__ (_UpperCAmelCase):
return "".join(REVERSE_DICT[char] for char in message.split())
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 'Morse code here!'
print(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = encrypt(_UpperCAmelCase)
print(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = decrypt(_UpperCAmelCase)
print(_UpperCAmelCase)
if __name__ == "__main__":
main()
| 444
| 1
|
"""simple docstring"""
from math import pi, sqrt
def lowerCamelCase__ ( __snake_case ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(__snake_case ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(__snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(__snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = 1.0
while num:
_a = float(input("""Gamma of: """))
print(F"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 19
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x2_0000 and cp <= 0x2_A6DF) #
or (cp >= 0x2_A700 and cp <= 0x2_B73F) #
or (cp >= 0x2_B740 and cp <= 0x2_B81F) #
or (cp >= 0x2_B820 and cp <= 0x2_CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2_F800 and cp <= 0x2_FA1F) #
): #
return True
return False
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
for char in word:
_UpperCamelCase = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
for token in tokens:
_UpperCamelCase = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
_UpperCamelCase = list(__snake_case )
return word_list
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCamelCase = max([len(__snake_case ) for w in chinese_word_set] )
_UpperCamelCase = bert_tokens
_UpperCamelCase , _UpperCamelCase = 0, len(__snake_case )
while start < end:
_UpperCamelCase = True
if is_chinese(bert_word[start] ):
_UpperCamelCase = min(end - start, __snake_case )
for i in range(__snake_case, 1, -1 ):
_UpperCamelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_UpperCamelCase = '''##''' + bert_word[j]
_UpperCamelCase = start + i
_UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_UpperCamelCase = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for i in range(0, len(__snake_case ), 1_00 ):
_UpperCamelCase = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=__snake_case, truncation=__snake_case, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(__snake_case ) == len(__snake_case )
_UpperCamelCase = []
for input_ids, chinese_word in zip(__snake_case, __snake_case ):
_UpperCamelCase = []
for id in input_ids:
_UpperCamelCase = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
_UpperCamelCase = add_sub_symbol(__snake_case, __snake_case )
_UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
_UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_UpperCamelCase = f.readlines()
_UpperCamelCase = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCamelCase = LTP(args.ltp ) # faster in GPU device
_UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCamelCase = prepare_ref(__snake_case, __snake_case, __snake_case )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_UpperCamelCase = [json.dumps(__snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_a = parser.parse_args()
main(args)
| 19
| 1
|
"""simple docstring"""
from __future__ import annotations
__lowerCAmelCase : Union[str, Any] = list[tuple[int, int]]
__lowerCAmelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCAmelCase : Dict = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self :str , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int , __magic_name__ :float , __magic_name__ :Node | None , ) -> Tuple:
'''simple docstring'''
a__ = pos_x
a__ = pos_y
a__ = (pos_y, pos_x)
a__ = goal_x
a__ = goal_y
a__ = g_cost
a__ = parent
a__ = self.calculate_heuristic()
def _UpperCamelCase ( self :int ) -> float:
'''simple docstring'''
a__ = abs(self.pos_x - self.goal_x )
a__ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self :List[str] , __magic_name__ :List[Any] ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self :Dict , __magic_name__ :tuple[int, int] , __magic_name__ :tuple[int, int] ) -> Tuple:
'''simple docstring'''
a__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __magic_name__ )
a__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __magic_name__ )
a__ = [self.start]
a__ = []
a__ = False
def _UpperCamelCase ( self :Union[str, Any] ) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
a__ = True
return self.retrace_path(__magic_name__ )
self.closed_nodes.append(__magic_name__ )
a__ = self.get_successors(__magic_name__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__magic_name__ )
else:
# retrieve the best current path
a__ = self.open_nodes.pop(self.open_nodes.index(__magic_name__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__magic_name__ )
else:
self.open_nodes.append(__magic_name__ )
if not self.reached:
return [self.start.pos]
return None
def _UpperCamelCase ( self :List[str] , __magic_name__ :Node ) -> list[Node]:
'''simple docstring'''
a__ = []
for action in delta:
a__ = parent.pos_x + action[1]
a__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__magic_name__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__magic_name__ , __magic_name__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __magic_name__ , ) )
return successors
def _UpperCamelCase ( self :Any , __magic_name__ :Node | None ) -> Path:
'''simple docstring'''
a__ = node
a__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a__ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__lowerCAmelCase : str = (0, 0)
__lowerCAmelCase : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__lowerCAmelCase : Optional[int] = GreedyBestFirst(init, goal)
__lowerCAmelCase : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__lowerCAmelCase : Tuple = 2
for elem in grid:
print(elem)
| 158
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowerCAmelCase : Optional[int] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __snake_case ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
a__ = _TestCommandArgs(dataset=UpperCamelCase , all_configs=UpperCamelCase , save_infos=UpperCamelCase )
a__ = TestCommand(*UpperCamelCase )
test_command.run()
a__ = os.path.join(UpperCamelCase , '''README.md''' )
assert os.path.exists(UpperCamelCase )
a__ = DatasetInfosDict.from_directory(UpperCamelCase )
a__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
a__ , a__ = getattr(dataset_infos['''default'''] , UpperCamelCase ), getattr(expected_dataset_infos['''default'''] , UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase , UpperCamelCase )
elif key == "splits":
assert list(UpperCamelCase ) == list(UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 158
| 1
|
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase ):
if len(__lowerCamelCase ) == 0:
return array
__snake_case , __snake_case : List[Any] = min(__lowerCamelCase ), max(__lowerCamelCase )
# Compute the variables
__snake_case : int = _max - _min + 1
__snake_case , __snake_case : Tuple = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__snake_case : Any = i - _min
__snake_case : int = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__snake_case : List[Any] = 0
for i in range(__lowerCamelCase ):
while holes_repeat[i] > 0:
__snake_case : List[str] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Optional[Any] = input("Enter numbers separated by comma:\n")
_snake_case : List[str] = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 81
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Any = SamImageProcessor()
__snake_case : Optional[int] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Optional[int] ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : int ) -> List[Any]:
__snake_case : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : int = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : int = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Optional[int] = self.prepare_image_inputs()
__snake_case : List[str] = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : Dict = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Tuple = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[str] = [torch.ones((1, 3, 5, 5) )]
__snake_case : Tuple = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : int = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : List[str] = [np.ones((1, 3, 5, 5) )]
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : str = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase ):
__snake_case : Optional[int] = processor.post_process_masks(lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) )
@require_vision
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : int = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : str , **lowerCamelCase : Any ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> Any:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Dict = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : int ) -> List[str]:
__snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : str = self.get_image_processor()
__snake_case : Union[str, Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : int = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : List[str] = processor(images=lowerCamelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __snake_case ( self : Any ) -> Optional[int]:
__snake_case : List[str] = self.get_image_processor()
__snake_case : Dict = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
__snake_case : List[Any] = [[1764, 2646]]
__snake_case : Dict = [[683, 1024]]
__snake_case : List[str] = processor.post_process_masks(lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Optional[Any] = processor.post_process_masks(
lowerCamelCase , tf.convert_to_tensor(lowerCamelCase ) , tf.convert_to_tensor(lowerCamelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__snake_case : Union[str, Any] = [np.ones((1, 3, 5, 5) )]
__snake_case : List[str] = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__snake_case : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , np.array(lowerCamelCase ) , np.array(lowerCamelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> str:
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : str = SamImageProcessor()
__snake_case : List[Any] = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str] , **lowerCamelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase ).image_processor
def __snake_case ( self : Optional[int] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : str = self.get_image_processor()
__snake_case : str = SamProcessor(image_processor=lowerCamelCase )
__snake_case : List[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case : Dict = [tf.convert_to_tensor(lowerCamelCase )]
__snake_case : List[Any] = [torch.tensor(lowerCamelCase )]
__snake_case : Optional[Any] = [[1764, 2646]]
__snake_case : Optional[int] = [[683, 1024]]
__snake_case : Union[str, Any] = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="tf" )
__snake_case : Dict = processor.post_process_masks(
lowerCamelCase , lowerCamelCase , lowerCamelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : Any = self.get_image_processor()
__snake_case : List[Any] = SamProcessor(image_processor=lowerCamelCase )
__snake_case : Dict = self.prepare_image_inputs()
__snake_case : Any = image_processor(lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Optional[Any] = processor(images=lowerCamelCase , return_tensors="pt" )["pixel_values"].numpy()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
__snake_case : List[Any] = processor(images=lowerCamelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase ) )
| 81
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : Dict = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase : Any = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Any = '''vivit'''
def __init__( self : Any , __A : str=224 , __A : List[str]=32 , __A : Any=[2, 16, 16] , __A : List[Any]=3 , __A : Dict=768 , __A : Union[str, Any]=12 , __A : Optional[int]=12 , __A : str=3072 , __A : Any="gelu_fast" , __A : Optional[Any]=0.0 , __A : Union[str, Any]=0.0 , __A : Optional[int]=0.0_2 , __A : Optional[Any]=1e-0_6 , __A : Optional[int]=True , **__A : str , ):
__A : Optional[Any] = hidden_size
__A : Any = num_hidden_layers
__A : Any = num_attention_heads
__A : str = intermediate_size
__A : List[str] = hidden_act
__A : Tuple = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Union[str, Any] = initializer_range
__A : Any = layer_norm_eps
__A : Dict = image_size
__A : int = num_frames
__A : Optional[int] = tubelet_size
__A : str = num_channels
__A : int = qkv_bias
super().__init__(**__A )
| 17
|
from collections import defaultdict
def a__ (__lowercase :str , __lowercase :str ) -> bool:
_A : Union[str, Any] = first_str.lower().strip()
_A : int = second_str.lower().strip()
# Remove whitespace
_A : int = first_str.replace(''' ''' , '''''' )
_A : Optional[Any] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__lowercase ) != len(__lowercase ):
return False
# Default values for count should be 0
_A : defaultdict[str, int] = defaultdict(__lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase : Dict =input('Enter the first string ').strip()
_UpperCamelCase : Union[str, Any] =input('Enter the second string ').strip()
_UpperCamelCase : Dict =check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 206
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = VQModel
__lowercase: Union[str, Any] = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[str]=(32, 32) ) ->Tuple:
"""simple docstring"""
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
snake_case_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
snake_case_ = image.to(UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
| 2
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
snake_case_ = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ = int(sequence[i] , 2 )
return sequence
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case_ = gray_code_sequence_string(bit_count - 1 )
snake_case_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case_ = """0""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case_ = """1""" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 1
|
from __future__ import annotations
import math
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = u
for i in range(1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = temp * (u - i)
return temp
def __a ( ) -> str:
SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('enter the numbers of values: ' ) )
SCREAMING_SNAKE_CASE : List[Any] = []
for _ in range(__lowerCAmelCase ):
y.append([] )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
y[i].append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = 0
print('enter the values of parameters in a list: ' )
SCREAMING_SNAKE_CASE : List[Any] = list(map(__lowerCAmelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Dict = float(input() )
SCREAMING_SNAKE_CASE : Any = int(input('enter the value to interpolate: ' ) )
SCREAMING_SNAKE_CASE : str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __lowerCAmelCase ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE : Any = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE : Optional[Any] = y[0][0]
for i in range(1 , __lowerCAmelCase ):
summ += (ucal(__lowerCAmelCase , __lowerCAmelCase ) * y[0][i]) / math.factorial(__lowerCAmelCase )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 352
|
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = 0
@slow
def lowerCAmelCase_ ( self : List[str] ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCAmelCase ) , 0 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
# Check that tokenizer_type ≠ model_type
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase_ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__lowerCAmelCase , """vocab.txt""" ) )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type="""bert""" , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__lowerCAmelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__lowerCAmelCase , """merges.txt""" ) )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type="""gpt2""" , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase_ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__lowerCAmelCase , """vocab.txt""" ) )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type="""bert""" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__lowerCAmelCase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__lowerCAmelCase , """merges.txt""" ) )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type="""gpt2""" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
with pytest.raises(__lowerCAmelCase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCAmelCase_ ( self : Optional[int] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_UpperCAmelCase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCAmelCase_ ( self : List[str] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCAmelCase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
_UpperCAmelCase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCAmelCase_ ( self : Optional[Any] ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
_UpperCAmelCase = TOKENIZER_MAPPING.values()
_UpperCAmelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__lowerCAmelCase )
_UpperCAmelCase = """Hello, world. How are you?"""
_UpperCAmelCase = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__lowerCAmelCase )
_UpperCAmelCase = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
# Check we can load the tokenizer config of an online model.
_UpperCAmelCase = get_tokenizer_config("""bert-base-cased""" )
_UpperCAmelCase = config.pop("""_commit_hash""" , __lowerCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCAmelCase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_UpperCAmelCase = get_tokenizer_config(__lowerCAmelCase )
self.assertDictEqual(__lowerCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = get_tokenizer_config(__lowerCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCAmelCase_ ( self : List[Any] ):
try:
AutoConfig.register("""custom""" , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
_UpperCAmelCase = CustomTokenizer.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCAmelCase_ ( self : List[Any] ):
try:
AutoConfig.register("""custom""" , __lowerCAmelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = BertTokenizerFast.from_pretrained(__lowerCAmelCase )
bert_tokenizer.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = CustomTokenizerFast.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : List[str] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCAmelCase ):
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCAmelCase_ ( self : List[str] ):
class a ( lowerCAmelCase_ ):
_snake_case : Tuple = False
class a ( lowerCAmelCase_ ):
_snake_case : Optional[int] = NewTokenizer
_snake_case : Any = False
try:
AutoConfig.register("""custom""" , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
# If remote code is not set, the default is to use local
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_UpperCAmelCase = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
__lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCAmelCase_ ( self : int ):
with self.assertRaisesRegex(
__lowerCAmelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , revision="""aaaaaa""" )
def lowerCAmelCase_ ( self : List[str] ):
# Make sure we have cached the tokenizer.
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 277
| 0
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@staticmethod
def UpperCamelCase_ ( __lowercase : ArgumentParser ):
'''simple docstring'''
__a = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__lowercase , default=__lowercase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__lowercase , help="""Name of the model to download""" )
download_parser.set_defaults(func=__lowercase )
def __init__( self : str , __lowercase : str , __lowercase : str , __lowercase : bool , __lowercase : bool ):
'''simple docstring'''
__a = model
__a = cache
__a = force
__a = trust_remote_code
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 547
|
from __future__ import annotations
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """MIT"""
lowerCamelCase__ = """1.0.0"""
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """contact@muhammadumerfarooq.me"""
lowerCamelCase__ = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Any , __lowercase : str ):
'''simple docstring'''
super().__init__()
__a = []
__a = domain
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : str , __lowercase : list[tuple[str, str | None]] ):
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__a = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(_SCREAMING_SNAKE_CASE ).split(""".""" )[-2:] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return parse.urlparse(_SCREAMING_SNAKE_CASE ).netloc
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "https://github.com" ):
"""simple docstring"""
__a = get_domain_name(_SCREAMING_SNAKE_CASE )
# Initialize the parser
__a = Parser(_SCREAMING_SNAKE_CASE )
try:
# Open URL
__a = requests.get(_SCREAMING_SNAKE_CASE )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__a = requests.get(_SCREAMING_SNAKE_CASE )
# Get the valid email.
__a = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_SCREAMING_SNAKE_CASE )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url("""https://github.com""")
print(F"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 547
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
UpperCamelCase_ = '''nat'''
UpperCamelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any=3 , UpperCAmelCase : List[Any]=64 , UpperCAmelCase : Dict=[3, 4, 6, 5] , UpperCAmelCase : List[str]=[2, 4, 8, 16] , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Optional[Any]=3.0 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : List[Any]=1e-5 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Tuple , ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : str =patch_size
lowercase : List[Any] =num_channels
lowercase : Optional[Any] =embed_dim
lowercase : Any =depths
lowercase : Optional[int] =len(UpperCAmelCase )
lowercase : Optional[Any] =num_heads
lowercase : List[Any] =kernel_size
lowercase : Optional[Any] =mlp_ratio
lowercase : Tuple =qkv_bias
lowercase : List[Any] =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Any =drop_path_rate
lowercase : Tuple =hidden_act
lowercase : List[str] =layer_norm_eps
lowercase : Dict =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : Optional[Any] =int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
lowercase : Optional[int] =layer_scale_init_value
lowercase : int =['''stem'''] + [f'stage{idx}' for idx in range(1 , len(UpperCAmelCase ) + 1 )]
lowercase , lowercase : Dict =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 94
|
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = num_of_nodes
UpperCAmelCase__: list[list[int]] = []
UpperCAmelCase__: dict[int, int] = {}
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
self.m_edges.append([u_node, v_node, weight] )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase__: Optional[Any] = self.find_component(lowerCamelCase__ )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase__: Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase__ )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase__: Dict = self.find_component(lowerCamelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase__ )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Dict = []
UpperCAmelCase__: int = 0
UpperCAmelCase__: list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase__: Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: str = edge
UpperCAmelCase__: str = self.m_component[u]
UpperCAmelCase__: Optional[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase__: List[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: str = edge
UpperCAmelCase__: str = self.m_component[u]
UpperCAmelCase__: int = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
print(F"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
UpperCAmelCase__: Any = [-1] * self.m_num_of_nodes
print(F"The total weight of the minimal spanning tree is: {mst_weight}" )
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113
| 0
|
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = [False] * len(UpperCamelCase__ )
A__ = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ , UpperCamelCase__ ):
A__ = True
A__ = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowerCamelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 536
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = (1 - _cos) / 2
A__ = 1 - _cos
A__ = 1 + alpha
A__ = -2 * _cos
A__ = 1 - alpha
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = (1 + _cos) / 2
A__ = -1 - _cos
A__ = 1 + alpha
A__ = -2 * _cos
A__ = 1 - alpha
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = _sin / 2
A__ = 0
A__ = -ba
A__ = 1 + alpha
A__ = -2 * _cos
A__ = 1 - alpha
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = 1 - alpha
A__ = -2 * _cos
A__ = 1 + alpha
A__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = 10 ** (gain_db / 40)
A__ = 1 + alpha * big_a
A__ = -2 * _cos
A__ = 1 - alpha * big_a
A__ = 1 + alpha / big_a
A__ = -2 * _cos
A__ = 1 - alpha / big_a
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = 10 ** (gain_db / 40)
A__ = (big_a + 1) - (big_a - 1) * _cos
A__ = (big_a + 1) + (big_a - 1) * _cos
A__ = (big_a - 1) - (big_a + 1) * _cos
A__ = (big_a - 1) + (big_a + 1) * _cos
A__ = 2 * sqrt(UpperCamelCase__ ) * alpha
A__ = big_a * (pmc + aaa)
A__ = 2 * big_a * mpc
A__ = big_a * (pmc - aaa)
A__ = ppmc + aaa
A__ = -2 * pmpc
A__ = ppmc - aaa
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
A__ = tau * frequency / samplerate
A__ = sin(UpperCamelCase__ )
A__ = cos(UpperCamelCase__ )
A__ = _sin / (2 * q_factor)
A__ = 10 ** (gain_db / 40)
A__ = (big_a + 1) - (big_a - 1) * _cos
A__ = (big_a + 1) + (big_a - 1) * _cos
A__ = (big_a - 1) - (big_a + 1) * _cos
A__ = (big_a - 1) + (big_a + 1) * _cos
A__ = 2 * sqrt(UpperCamelCase__ ) * alpha
A__ = big_a * (ppmc + aaa)
A__ = -2 * big_a * pmpc
A__ = big_a * (ppmc - aaa)
A__ = pmc + aaa
A__ = 2 * mpc
A__ = pmc - aaa
A__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 536
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE (A="" ) -> str:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
return os.path.join(A , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowercase__ = AgentAudio(UpperCamelCase )
lowercase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase ) )
# Ensure that the file contains the same value as the original tensor
lowercase__ ,lowercase__ = sf.read(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , torch.tensor(UpperCamelCase ) , atol=1E-4 ) )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowercase__ = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase , UpperCamelCase , 16000 )
lowercase__ = AgentAudio(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase )
@require_vision
@require_torch
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = torch.randint(0 , 256 , (64, 64, 3) )
lowercase__ = AgentImage(UpperCamelCase )
lowercase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase ) )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowercase__ = Image.open(UpperCamelCase )
lowercase__ = AgentImage(UpperCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase ) )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowercase__ = Image.open(UpperCamelCase )
lowercase__ = AgentImage(UpperCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase ) )
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = '''Hey!'''
lowercase__ = AgentText(UpperCamelCase )
self.assertEqual(UpperCamelCase , agent_type.to_string() )
self.assertEqual(UpperCamelCase , agent_type.to_raw() )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 460
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : str = {'vocab_file': 'spiece.model'}
lowerCamelCase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any=False , UpperCamelCase : Dict=True , UpperCamelCase : int=False , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : List[Any]="<sep>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : int="<cls>" , UpperCamelCase : int="<mask>" , UpperCamelCase : str=["<eop>", "<eod>"] , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
lowercase__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ = jieba
lowercase__ = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__(self : Dict , UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
if self.remove_space:
lowercase__ = ''' '''.join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase__ = unicodedata.normalize('''NFKD''' , UpperCamelCase )
lowercase__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = self.preprocess_text(UpperCamelCase )
lowercase__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
lowercase__ = []
for piece in pieces:
if len(UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase )
else:
new_pieces.append(UpperCamelCase )
return new_pieces
def UpperCamelCase__ (self : int , UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase )
def UpperCamelCase__ (self : str , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = ''''''.join(UpperCamelCase ).replace(UpperCamelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1]
return ([0] * len(UpperCamelCase )) + [1, 1]
def UpperCamelCase__ (self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ (self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCamelCase__ (self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = super()._decode(*UpperCamelCase , **UpperCamelCase )
lowercase__ = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 460
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = 'audio-spectrogram-transformer'
def __init__( self , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-12 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=10_24 , __SCREAMING_SNAKE_CASE=1_28 , **__SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCAmelCase__)
UpperCamelCase__ : Optional[int] =hidden_size
UpperCamelCase__ : Optional[Any] =num_hidden_layers
UpperCamelCase__ : Any =num_attention_heads
UpperCamelCase__ : Dict =intermediate_size
UpperCamelCase__ : Union[str, Any] =hidden_act
UpperCamelCase__ : Dict =hidden_dropout_prob
UpperCamelCase__ : Optional[Any] =attention_probs_dropout_prob
UpperCamelCase__ : Optional[int] =initializer_range
UpperCamelCase__ : int =layer_norm_eps
UpperCamelCase__ : int =patch_size
UpperCamelCase__ : List[Any] =qkv_bias
UpperCamelCase__ : List[str] =frequency_stride
UpperCamelCase__ : List[str] =time_stride
UpperCamelCase__ : Optional[Any] =max_length
UpperCamelCase__ : str =num_mel_bins
| 718
|
def _lowerCamelCase ( A_ : int , A_ : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 582
| 0
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_snake_case = logging.getLogger(__name__)
_snake_case = """Hello world! cécé herlolip"""
_snake_case = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = BertAbsConfig(
temp_dir="." , finetune_bert=__magic_name__ , large=__magic_name__ , share_emb=__magic_name__ , use_bert_emb=__magic_name__ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowercase__ = torch.load(__magic_name__ , lambda __magic_name__ , __magic_name__ : storage )
lowercase__ = AbsSummarizer(__magic_name__ , torch.device("cpu" ) , __magic_name__ )
original.eval()
lowercase__ = BertAbsSummarizer(__magic_name__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
lowercase__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
lowercase__ = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__magic_name__ )) )
lowercase__ = torch.tensor(__magic_name__ ).unsqueeze(0 )
lowercase__ = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(__magic_name__ )) )
lowercase__ = torch.tensor(__magic_name__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowercase__ = encoder_input_ids
lowercase__ = decoder_input_ids
lowercase__ = lowercase__ = None
lowercase__ = None
lowercase__ = lowercase__ = None
lowercase__ = lowercase__ = None
lowercase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowercase__ = original(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0]
lowercase__ = original.generator(__magic_name__ )
lowercase__ = new_model(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0]
lowercase__ = new_model.generator(__magic_name__ )
lowercase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(__magic_name__ ) )
lowercase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(__magic_name__ ) )
lowercase__ = torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_snake_case = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 655
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655
| 1
|
import datasets
from .evaluate import evaluate
__SCREAMING_SNAKE_CASE : Any = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__SCREAMING_SNAKE_CASE : Union[str, Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__SCREAMING_SNAKE_CASE : List[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Tuple = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_snake_case : List[str] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_snake_case : int = evaluate(dataset=lowercase_ , predictions=lowercase_ )
return score
| 702
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowercase_ ( datasets.BuilderConfig ):
_lowerCamelCase = None
class lowercase_ ( datasets.ArrowBasedBuilder ):
_lowerCamelCase = PandasConfig
def UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self , lowercase_ ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_snake_case : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase_ , (str, list, tuple) ):
_snake_case : str = data_files
if isinstance(lowercase_ , lowercase_ ):
_snake_case : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case : Optional[Any] = [dl_manager.iter_files(lowercase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_snake_case : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowercase_ , lowercase_ ):
_snake_case : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_snake_case : int = [dl_manager.iter_files(lowercase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase_ , gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase ( self , lowercase_ ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_snake_case : str = table_cast(lowercase_ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase ( self , lowercase_ ):
for i, file in enumerate(itertools.chain.from_iterable(lowercase_ ) ):
with open(lowercase_ , "rb" ) as f:
_snake_case : Dict = pa.Table.from_pandas(pd.read_pickle(lowercase_ ) )
yield i, self._cast_table(lowercase_ )
| 580
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCamelCase : int = '''CIDAS/clipseg-rd64-refined'''
lowerCamelCase : Optional[int] = '''image_segmenter'''
lowerCamelCase : int = CLIPSegForImageSegmentation
lowerCamelCase : Tuple = ['''image''', '''text''']
lowerCamelCase : List[str] = ['''image''']
def __init__( self : Optional[Any] , *_lowercase : List[Any] , **_lowercase : List[str] ):
requires_backends(self , ['''vision'''] )
super().__init__(*_lowercase , **_lowercase )
def lowercase__ ( self : str , _lowercase : "Image" , _lowercase : str ):
return self.pre_processor(text=[label] , images=[image] , padding=_lowercase , return_tensors='''pt''' )
def lowercase__ ( self : int , _lowercase : Dict ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model(**_lowercase ).logits
return logits
def lowercase__ ( self : Tuple , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 35
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
UpperCamelCase__ = True
from torch.cuda.amp import autocast
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCamelCase__ ( UpperCAmelCase_=None , UpperCAmelCase_=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCAmelCase_ )
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
UpperCAmelCase_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase_ = field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase_ = field(
default=A_ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCAmelCase_ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
UpperCAmelCase_ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
UpperCAmelCase_ = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
UpperCAmelCase_ = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
UpperCAmelCase_ = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
UpperCAmelCase_ = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
UpperCAmelCase_ = field(
default=A_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase_ = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCAmelCase_ = field(
default=A_ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCAmelCase_ = field(
default=A_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCAmelCase_ = field(
default=A_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase_ = field(
default=A_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase_ = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = True
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def __call__( self : List[Any] , UpperCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
_lowercase : int = [{'''input_values''': feature['''input_values''']} for feature in features]
_lowercase : Dict = [{'''input_ids''': feature['''labels''']} for feature in features]
_lowercase : int = self.processor.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_lowercase : Union[str, Any] = self.processor.pad(
labels=UpperCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_lowercase : Optional[Any] = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
_lowercase : Optional[Any] = labels
return batch
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase : nn.Module , UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
_lowercase : Tuple = self._prepare_inputs(UpperCamelCase )
if self.use_amp:
with autocast():
_lowercase : Union[str, Any] = self.compute_loss(UpperCamelCase , UpperCamelCase )
else:
_lowercase : List[str] = self.compute_loss(UpperCamelCase , UpperCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_lowercase : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowercase : Optional[Any] = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
_lowercase : Optional[int] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase )
else:
loss.backward()
return loss.detach()
def UpperCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_lowercase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase : int = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_lowercase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_lowercase : Tuple = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_lowercase : Dict = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_lowercase : Tuple = F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(UpperCAmelCase_ ):
_lowercase : List[Any] = re.sub(UpperCAmelCase_ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_lowercase : Tuple = train_dataset.map(UpperCAmelCase_ , remove_columns=['''sentence'''] )
_lowercase : int = eval_dataset.map(UpperCAmelCase_ , remove_columns=['''sentence'''] )
def extract_all_chars(UpperCAmelCase_ ):
_lowercase : int = ''' '''.join(batch['''text'''] )
_lowercase : int = list(set(UpperCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_lowercase : List[Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=-1 , keep_in_memory=UpperCAmelCase_ , remove_columns=train_dataset.column_names , )
_lowercase : Any = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=-1 , keep_in_memory=UpperCAmelCase_ , remove_columns=eval_dataset.column_names , )
_lowercase : Optional[int] = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_lowercase : str = {v: k for k, v in enumerate(UpperCAmelCase_ )}
_lowercase : Dict = vocab_dict[''' ''']
del vocab_dict[" "]
_lowercase : Any = len(UpperCAmelCase_ )
_lowercase : str = len(UpperCAmelCase_ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase : List[str] = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_lowercase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ )
_lowercase : int = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
_lowercase : str = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_lowercase : List[str] = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_lowercase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) )
if data_args.max_val_samples is not None:
_lowercase : List[str] = eval_dataset.select(range(data_args.max_val_samples ) )
_lowercase : Tuple = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCAmelCase_ ):
_lowercase , _lowercase : List[Any] = torchaudio.load(batch['''path'''] )
_lowercase : Optional[int] = resampler(UpperCAmelCase_ ).squeeze().numpy()
_lowercase : Any = 16000
_lowercase : List[str] = batch['''text''']
return batch
_lowercase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_lowercase : Union[str, Any] = eval_dataset.map(
UpperCAmelCase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCAmelCase_ ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
_lowercase : Dict = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(UpperCAmelCase_ )
return batch
_lowercase : Any = train_dataset.map(
UpperCAmelCase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
_lowercase : Optional[Any] = eval_dataset.map(
UpperCAmelCase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_lowercase : Any = datasets.load_metric('''wer''' )
def compute_metrics(UpperCAmelCase_ ):
_lowercase : Optional[Any] = pred.predictions
_lowercase : Dict = np.argmax(UpperCAmelCase_ , axis=-1 )
_lowercase : Optional[int] = processor.tokenizer.pad_token_id
_lowercase : List[Any] = processor.batch_decode(UpperCAmelCase_ )
# we do not want to group tokens when computing the metrics
_lowercase : str = processor.batch_decode(pred.label_ids , group_tokens=UpperCAmelCase_ )
_lowercase : Union[str, Any] = wer_metric.compute(predictions=UpperCAmelCase_ , references=UpperCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_lowercase : List[str] = DataCollatorCTCWithPadding(processor=UpperCAmelCase_ , padding=UpperCAmelCase_ )
# Initialize our Trainer
_lowercase : Dict = CTCTrainer(
model=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_lowercase : Optional[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_lowercase : Tuple = model_args.model_name_or_path
else:
_lowercase : Tuple = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_lowercase : Union[str, Any] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
_lowercase : Any = train_result.metrics
_lowercase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_lowercase : Dict = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('''train''' , UpperCAmelCase_ )
trainer.save_metrics('''train''' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
_lowercase : Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_lowercase : Any = trainer.evaluate()
_lowercase : Union[str, Any] = data_args.max_val_samples if data_args.max_val_samples is not None else len(UpperCAmelCase_ )
_lowercase : str = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('''eval''' , UpperCAmelCase_ )
trainer.save_metrics('''eval''' , UpperCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 322
| 0
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger()
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> Optional[Any]:
'''simple docstring'''
print(F"Converting {name}..." )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
_A= timm.create_model('levit_128s' , pretrained=lowerCAmelCase_ )
else:
_A= timm.create_model('levit_128' , pretrained=lowerCAmelCase_ )
if hidden_sizes == 1_92:
_A= timm.create_model('levit_192' , pretrained=lowerCAmelCase_ )
if hidden_sizes == 2_56:
_A= timm.create_model('levit_256' , pretrained=lowerCAmelCase_ )
if hidden_sizes == 3_84:
_A= timm.create_model('levit_384' , pretrained=lowerCAmelCase_ )
from_model.eval()
_A= LevitForImageClassificationWithTeacher(lowerCAmelCase_ ).eval()
_A= OrderedDict()
_A= from_model.state_dict()
_A= list(from_model.state_dict().keys() )
_A= list(our_model.state_dict().keys() )
print(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for i in range(len(lowerCAmelCase_ ) ):
_A= weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase_ )
_A= torch.randn((2, 3, 2_24, 2_24) )
_A= from_model(lowerCAmelCase_ )
_A= our_model(lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), "The model logits don't match the original one."
_A= name
print(lowerCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_A= LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"Pushed {checkpoint_name}" )
def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True ) -> Union[str, Any]:
'''simple docstring'''
_A= 'imagenet-1k-id2label.json'
_A= 10_00
_A= (1, num_labels)
_A= 'huggingface/label-files'
_A= num_labels
_A= json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_A= {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
_A= idalabel
_A= {v: k for k, v in idalabel.items()}
_A= partial(lowerCAmelCase_ , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ )
_A= {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
_A= {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCAmelCase_ , names_to_config[model_name] , lowerCAmelCase_ , lowerCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 476
|
UpperCAmelCase_ = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 476
| 1
|
'''simple docstring'''
import math
def __UpperCamelCase( _A : int ):
'''simple docstring'''
if not isinstance(_A , _A ):
UpperCAmelCase__ : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 1:
UpperCAmelCase__ : Optional[Any] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_A )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCAmelCase__ : str = int(math.log(number // 3 , 2 ) ) + 2
UpperCAmelCase__ : str = [3, 5]
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : List[str] = 3
for block in range(1 , _A ):
for _ in range(_A ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCamelCase__ : Any = 0
try:
UpperCamelCase__ : int = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 614
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ : Dict = logging.getLogger(__name__)
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=_A , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=_A , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=_A , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=_A , default='''data/dump''' , help='''The dump file prefix.''' )
UpperCAmelCase__ : Optional[int] = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
UpperCAmelCase__ : List[str] = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
UpperCAmelCase__ : Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ : Union[str, Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
UpperCAmelCase__ : Dict = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
UpperCAmelCase__ : Optional[Any] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
UpperCAmelCase__ : List[Any] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F'''{len(_A )} examples to process.''' )
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[int] = 1_00_00
UpperCAmelCase__ : Tuple = time.time()
for text in data:
UpperCAmelCase__ : Any = F'''{bos} {text.strip()} {sep}'''
UpperCAmelCase__ : int = tokenizer.encode(_A , add_special_tokens=_A )
rslt.append(_A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ : int = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
UpperCAmelCase__ : Optional[Any] = time.time()
logger.info('''Finished binarization''' )
logger.info(F'''{len(_A )} examples processed.''' )
UpperCAmelCase__ : Dict = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
UpperCAmelCase__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ : Any = [np.uintaa(_A ) for d in rslt]
else:
UpperCAmelCase__ : str = [np.intaa(_A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(_A , '''wb''' ) as handle:
pickle.dump(rslt_ , _A , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 614
| 1
|
import qiskit
def A ( lowercase = 2 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCamelCase = qubits
# Using Aer's simulator
UpperCamelCase = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
UpperCamelCase = qiskit.QuantumCircuit(lowercase , lowercase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowercase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowercase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowercase ) ) , list(range(lowercase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCamelCase = qiskit.execute(lowercase , lowercase , shots=1_000 )
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 3
|
from collections.abc import Callable
def A ( lowercase , lowercase , lowercase ) -> float:
'''simple docstring'''
UpperCamelCase = a
UpperCamelCase = b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A ( lowercase ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 3
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = ['input_features']
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=80 , __SCREAMING_SNAKE_CASE : List[Any]=16000 , __SCREAMING_SNAKE_CASE : int=160 , __SCREAMING_SNAKE_CASE : Optional[int]=30 , __SCREAMING_SNAKE_CASE : str=400 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[str]=False , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Union[str, Any]:
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =n_fft
__UpperCAmelCase =hop_length
__UpperCAmelCase =chunk_length
__UpperCAmelCase =chunk_length * sampling_rate
__UpperCAmelCase =self.n_samples // hop_length
__UpperCAmelCase =sampling_rate
__UpperCAmelCase =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray:
__UpperCAmelCase =spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
__UpperCAmelCase =log_spec[:, :-1]
__UpperCAmelCase =np.maximum(__SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
__UpperCAmelCase =(log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__UpperCAmelCase =np.array(__SCREAMING_SNAKE_CASE , np.intaa )
__UpperCAmelCase =[]
for vector, length in zip(__SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
__UpperCAmelCase =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__UpperCAmelCase =padding_value
normed_input_values.append(__SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase =[(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "max_length" , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__UpperCAmelCase =isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCAmelCase =is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__UpperCAmelCase =np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase =[np.asarray([raw_speech] ).T]
__UpperCAmelCase =BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
__UpperCAmelCase =self.pad(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCAmelCase =self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
__UpperCAmelCase =np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
__UpperCAmelCase =padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
__UpperCAmelCase =[self._np_extract_fbank_features(__SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase =[np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
__UpperCAmelCase =input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCAmelCase =padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
__UpperCAmelCase =padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE )
return padded_inputs
def _a ( self : int ) -> Dict[str, Any]:
__UpperCAmelCase =copy.deepcopy(self.__dict__ )
__UpperCAmelCase =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 68
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple:
__SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str:
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : int = True
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict:
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> int:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.jit.trace(
UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) )
__SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 682
| 0
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase : Dict = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase : List[str] = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def __a ( A__ ) -> int:
lowerCAmelCase = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=A__ )[0]
@deprecated(A__ , "Please use tf.data to implement this functionality." )
def __a ( A__ ) -> List[str]:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
lowerCAmelCase = _readaa(A__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowerCAmelCase = _readaa(A__ )
lowerCAmelCase = _readaa(A__ )
lowerCAmelCase = _readaa(A__ )
lowerCAmelCase = bytestream.read(rows * cols * num_images )
lowerCAmelCase = numpy.frombuffer(A__ , dtype=numpy.uinta )
lowerCAmelCase = data.reshape(A__ , A__ , A__ , 1 )
return data
@deprecated(A__ , "Please use tf.one_hot on tensors." )
def __a ( A__ , A__ ) -> Tuple:
lowerCAmelCase = labels_dense.shape[0]
lowerCAmelCase = numpy.arange(A__ ) * num_classes
lowerCAmelCase = numpy.zeros((num_labels, num_classes) )
lowerCAmelCase = 1
return labels_one_hot
@deprecated(A__ , "Please use tf.data to implement this functionality." )
def __a ( A__ , A__=False , A__=10 ) -> Optional[int]:
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=A__ ) as bytestream:
lowerCAmelCase = _readaa(A__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowerCAmelCase = _readaa(A__ )
lowerCAmelCase = bytestream.read(A__ )
lowerCAmelCase = numpy.frombuffer(A__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(A__ , A__ )
return labels
class _lowerCAmelCase :
"""simple docstring"""
@deprecated(
SCREAMING_SNAKE_CASE , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[int]=dtypes.floataa , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=None , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowerCAmelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowerCAmelCase = 1_0_0_0_0
lowerCAmelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"images.shape: {images.shape} labels.shape: {labels.shape}"
lowerCAmelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowerCAmelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowerCAmelCase = images.astype(numpy.floataa )
lowerCAmelCase = numpy.multiply(SCREAMING_SNAKE_CASE , 1.0 / 2_5_5.0 )
lowerCAmelCase = images
lowerCAmelCase = labels
lowerCAmelCase = 0
lowerCAmelCase = 0
@property
def __A ( self : Dict ) -> List[str]:
"""simple docstring"""
return self._images
@property
def __A ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self._labels
@property
def __A ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._num_examples
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self._epochs_completed
def __A ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Any=True ) -> Optional[Any]:
"""simple docstring"""
if fake_data:
lowerCAmelCase = [1] * 7_8_4
lowerCAmelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE )],
)
lowerCAmelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowerCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.images[perma]
lowerCAmelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowerCAmelCase = self._num_examples - start
lowerCAmelCase = self._images[start : self._num_examples]
lowerCAmelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowerCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.images[perm]
lowerCAmelCase = self.labels[perm]
# Start next epoch
lowerCAmelCase = 0
lowerCAmelCase = batch_size - rest_num_examples
lowerCAmelCase = self._index_in_epoch
lowerCAmelCase = self._images[start:end]
lowerCAmelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowerCAmelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(A__ , "Please write your own downloading logic." )
def __a ( A__ , A__ , A__ ) -> Optional[Any]:
if not gfile.Exists(A__ ):
gfile.MakeDirs(A__ )
lowerCAmelCase = os.path.join(A__ , A__ )
if not gfile.Exists(A__ ):
urllib.request.urlretrieve(A__ , A__ ) # noqa: S310
with gfile.GFile(A__ ) as f:
lowerCAmelCase = f.size()
print("Successfully downloaded" , A__ , A__ , "bytes." )
return filepath
@deprecated(
A__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def __a ( A__ , A__=False , A__=False , A__=dtypes.floataa , A__=True , A__=5000 , A__=None , A__=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=A__ , one_hot=A__ , dtype=A__ , seed=A__ )
lowerCAmelCase = fake()
lowerCAmelCase = fake()
lowerCAmelCase = fake()
return _Datasets(train=A__ , validation=A__ , test=A__ )
if not source_url: # empty string check
lowerCAmelCase = DEFAULT_SOURCE_URL
lowerCAmelCase = "train-images-idx3-ubyte.gz"
lowerCAmelCase = "train-labels-idx1-ubyte.gz"
lowerCAmelCase = "t10k-images-idx3-ubyte.gz"
lowerCAmelCase = "t10k-labels-idx1-ubyte.gz"
lowerCAmelCase = _maybe_download(
A__ , A__ , source_url + train_images_file )
with gfile.Open(A__ , "rb" ) as f:
lowerCAmelCase = _extract_images(A__ )
lowerCAmelCase = _maybe_download(
A__ , A__ , source_url + train_labels_file )
with gfile.Open(A__ , "rb" ) as f:
lowerCAmelCase = _extract_labels(A__ , one_hot=A__ )
lowerCAmelCase = _maybe_download(
A__ , A__ , source_url + test_images_file )
with gfile.Open(A__ , "rb" ) as f:
lowerCAmelCase = _extract_images(A__ )
lowerCAmelCase = _maybe_download(
A__ , A__ , source_url + test_labels_file )
with gfile.Open(A__ , "rb" ) as f:
lowerCAmelCase = _extract_labels(A__ , one_hot=A__ )
if not 0 <= validation_size <= len(A__ ):
lowerCAmelCase = (
"Validation size should be between 0 and "
f"{len(A__ )}. Received: {validation_size}."
)
raise ValueError(A__ )
lowerCAmelCase = train_images[:validation_size]
lowerCAmelCase = train_labels[:validation_size]
lowerCAmelCase = train_images[validation_size:]
lowerCAmelCase = train_labels[validation_size:]
lowerCAmelCase = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowerCAmelCase = _DataSet(A__ , A__ , **A__ )
lowerCAmelCase = _DataSet(A__ , A__ , **A__ )
lowerCAmelCase = _DataSet(A__ , A__ , **A__ )
return _Datasets(train=A__ , validation=A__ , test=A__ )
| 159
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Tuple=1_6 , SCREAMING_SNAKE_CASE : Any=[1, 2, 1] , SCREAMING_SNAKE_CASE : str=[2, 2, 4] , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : str=2.0 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]="gelu" , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str=0.0_2 , SCREAMING_SNAKE_CASE : Tuple=1E-5 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=1_0 , SCREAMING_SNAKE_CASE : int=8 , SCREAMING_SNAKE_CASE : str=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE : str=[1, 2, 3] , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = patch_norm
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = is_training
lowerCAmelCase = scope
lowerCAmelCase = use_labels
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = encoder_stride
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
def __A ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = ["stem"]
lowerCAmelCase = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def __A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def __A ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Any ) -> int:
"""simple docstring"""
return
def __A ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE )
@unittest.skip("Swin does not use inputs_embeds" )
def __A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def __A ( self : Dict ) -> int:
"""simple docstring"""
pass
def __A ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def __A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def __A ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def __A ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def __A ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def __A ( self : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE : Optional[int] ):
lowerCAmelCase = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple={} ):
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE )}. Dict has"
f" `nan`: {torch.isnan(SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCAmelCase = MaskFormerSwinConfig
def __A ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModelTester(self )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowerCAmelCase = backbone_class(SCREAMING_SNAKE_CASE )
backbone.to(SCREAMING_SNAKE_CASE )
backbone.eval()
lowerCAmelCase = backbone(**SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCAmelCase = backbone(**SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCAmelCase = backbone(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 159
| 1
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def __a ( _UpperCamelCase: Any ) -> np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def __a ( _UpperCamelCase: str , _UpperCamelCase: List[Any] , _UpperCamelCase: List[str] ) -> np.ndarray:
"""simple docstring"""
_snake_case = np.nan
for i in range(__lowerCAmelCase ):
_snake_case = features[:, labels == i]
_snake_case = data.mean(1 )
# Centralize the data of class i
_snake_case = data - column_reshape(__lowerCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__lowerCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case = np.dot(__lowerCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __a ( _UpperCamelCase: int , _UpperCamelCase: List[str] , _UpperCamelCase: Optional[Any] ) -> np.ndarray:
"""simple docstring"""
_snake_case = features.mean(1 )
_snake_case = np.nan
for i in range(__lowerCAmelCase ):
_snake_case = features[:, labels == i]
_snake_case = data.shape[1]
_snake_case = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase ) , (column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_snake_case = device_data * np.dot(
column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase ) , (column_reshape(__lowerCAmelCase ) - column_reshape(__lowerCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: Tuple ) -> np.ndarray:
"""simple docstring"""
if features.any():
_snake_case = features.mean(1 )
# Center the dataset
_snake_case = features - np.reshape(__lowerCAmelCase , (data_mean.size, 1) )
_snake_case = np.dot(__lowerCAmelCase , centered_data.T ) / features.shape[1]
_snake_case = np.linalg.eigh(__lowerCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_snake_case = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_snake_case = np.dot(filtered_eigenvectors.T , __lowerCAmelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCAmelCase )
logging.error("Dataset empty" )
raise AssertionError
def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: Optional[Any] , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: int ) -> np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_snake_case = eigh(
covariance_between_classes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , covariance_within_classes(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
_snake_case = eigenvectors[:, ::-1][:, :dimensions]
_snake_case = np.linalg.svd(__lowerCAmelCase )
_snake_case = svd_matrix[:, 0:dimensions]
_snake_case = np.dot(filtered_svd_matrix.T , __lowerCAmelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__lowerCAmelCase )
logging.error("Dataset empty" )
raise AssertionError
def __a ( ) -> None:
"""simple docstring"""
_snake_case = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_snake_case = np.array([0, 0, 0, 1, 1] )
_snake_case = 2
_snake_case = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__lowerCAmelCase ) as error_info:
_snake_case = linear_discriminant_analysis(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if isinstance(__lowerCAmelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def __a ( ) -> None:
"""simple docstring"""
_snake_case = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_snake_case = 2
_snake_case = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(__lowerCAmelCase ) as error_info:
_snake_case = principal_component_analysis(__lowerCAmelCase , __lowerCAmelCase )
if not np.allclose(__lowerCAmelCase , __lowerCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185
|
def __a ( __lowerCAmelCase ) -> List[str]:
stooge(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return arr
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
SCREAMING_SNAKE_CASE : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__lowerCAmelCase , __lowerCAmelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__lowerCAmelCase , i + t , (__lowerCAmelCase) )
# Recursively sort first 2/3 elements
stooge(__lowerCAmelCase , __lowerCAmelCase , (h - t) )
if __name__ == "__main__":
_lowerCamelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 352
| 0
|
'''simple docstring'''
import numpy as np
def UpperCamelCase__ ( _lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466
|
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : int ) -> int:
if not isinstance(_lowercase , _lowercase ):
__UpperCAmelCase: List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 1:
__UpperCAmelCase: Dict = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowercase )
__UpperCAmelCase: int = 1
for i in range(1 , _lowercase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466
| 1
|
'''simple docstring'''
def A (__lowerCamelCase :str ):
_lowerCAmelCase = len(__lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase = arr[mi::-1] + arr[mi + 1 : len(__lowerCamelCase )]
# Reverse whole list
_lowerCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(__lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_lowercase = input("""Enter numbers separated by a comma:\n""").strip()
_lowercase = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 5
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {'''vocab_file''': '''vocab.txt'''}
SCREAMING_SNAKE_CASE : List[str] = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
SCREAMING_SNAKE_CASE : str = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = collections.OrderedDict()
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as reader:
_lowerCAmelCase = reader.readlines()
for index, token in enumerate(snake_case_ ):
_lowerCAmelCase = token.rstrip("""\n""" )
_lowerCAmelCase = index
return vocab
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ):
'''simple docstring'''
_lowerCAmelCase = vocab
_lowerCAmelCase = unk_token
_lowerCAmelCase = max_input_chars_per_word
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase = 0
_lowerCAmelCase = []
while start < len(lowerCamelCase ):
_lowerCAmelCase = len(lowerCamelCase )
_lowerCAmelCase = None
while start < end:
_lowerCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
_lowerCAmelCase = end
return sub_tokens
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = False
def __init__(self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ):
'''simple docstring'''
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
_lowerCAmelCase = bod_token
_lowerCAmelCase = eod_token
_lowerCAmelCase = load_vocab(lowerCamelCase )
_lowerCAmelCase = self.encoder[space_token]
_lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A__ (self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def A__ (self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def A__ (self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def A__ (self ):
'''simple docstring'''
return len(self.encoder )
def A__ (self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def A__ (self , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = [i for i in token_ids if i >= 0]
_lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return token in self.encoder
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return "".join(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase , self.unk_token )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if os.path.isdir(lowerCamelCase ):
_lowerCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase = 0
if " " in self.encoder:
_lowerCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase ))
| 156
| 0
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Model type selected in the list: ' + ', '.join(__snake_case )} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCamelCase__ =field(
default=128, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
lowerCamelCase__ =field(
default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'}, )
lowerCamelCase__ =field(
default=64, metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
}, )
lowerCamelCase__ =field(
default=30, metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCamelCase__ =field(
default=0.0, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCamelCase__ =field(
default=20, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCamelCase__ =field(
default=0, metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
}, )
lowerCamelCase__ =field(default=1, metadata={'help': 'multiple threads for converting example to features'} )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='train'
lowerCamelCase__ ='dev'
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ = None , a_ = Split.train , a_ = False , a_ = None , a_ = "pt" , ):
'''simple docstring'''
__snake_case : Any = args
__snake_case : Dict = is_language_sensitive
__snake_case : Optional[int] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a_ , a_ ):
try:
__snake_case : str = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__snake_case : Union[str, Any] = mode
# Load data features from cache or dataset file
__snake_case : Optional[int] = '''v2''' if args.version_2_with_negative else '''v1'''
__snake_case : int = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : Union[str, Any] = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not args.overwrite_cache:
__snake_case : Optional[int] = time.time()
__snake_case : Dict = torch.load(a_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__snake_case : Optional[int] = self.old_features['''features''']
__snake_case : Union[str, Any] = self.old_features.get('''dataset''' , a_ )
__snake_case : Dict = self.old_features.get('''examples''' , a_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
''' future run''' )
else:
if mode == Split.dev:
__snake_case : Optional[int] = self.processor.get_dev_examples(args.data_dir )
else:
__snake_case : List[Any] = self.processor.get_train_examples(args.data_dir )
__snake_case : Optional[int] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a_ , )
__snake_case : Optional[Any] = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , a_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
__snake_case : List[str] = self.features[i]
__snake_case : str = torch.tensor(feature.input_ids , dtype=torch.long )
__snake_case : Any = torch.tensor(feature.attention_mask , dtype=torch.long )
__snake_case : Optional[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
__snake_case : Any = torch.tensor(feature.cls_index , dtype=torch.long )
__snake_case : Tuple = torch.tensor(feature.p_mask , dtype=torch.float )
__snake_case : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
__snake_case : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__snake_case : int = torch.tensor(feature.start_position , dtype=torch.long )
__snake_case : str = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 712
|
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
SCREAMING_SNAKE_CASE : str = 100
SCREAMING_SNAKE_CASE : str = set(range(3, NUM_PRIMES, 2))
primes.add(2)
SCREAMING_SNAKE_CASE : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def lowercase ( _snake_case : int ) ->set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case : set[int] = set()
__snake_case : int
__snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowercase ( _snake_case : int = 5_000 ) ->int | None:
"""simple docstring"""
for number_to_partition in range(1 , _snake_case ):
if len(partition(_snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 229
| 0
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase ( __a ):
__A : Dict = (DDIMParallelScheduler,)
__A : int = (('eta', 0.0), ('num_inference_steps', 50))
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_snake_case )
return config
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(**_snake_case )
lowerCAmelCase_ = scheduler_class(**_snake_case )
lowerCAmelCase_ ,lowerCAmelCase_ = 10, 0.0
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for t in scheduler.timesteps:
lowerCAmelCase_ = model(_snake_case , _snake_case )
lowerCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , _snake_case ).prev_sample
return sample
def UpperCAmelCase_ ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCAmelCase_ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_snake_case )
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase_ = scheduler_class(**_snake_case )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCAmelCase_ ( self ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def UpperCAmelCase_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def UpperCAmelCase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCAmelCase_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def UpperCAmelCase_ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_snake_case )
def UpperCAmelCase_ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_snake_case )
def UpperCAmelCase_ ( self ):
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def UpperCAmelCase_ ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_snake_case )
def UpperCAmelCase_ ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_snake_case , num_inference_steps=_snake_case )
def UpperCAmelCase_ ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_snake_case , eta=_snake_case )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**_snake_case )
lowerCAmelCase_ ,lowerCAmelCase_ = 10, 0.0
scheduler.set_timesteps(_snake_case )
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = self.dummy_sample_deter + 0.1
lowerCAmelCase_ = self.dummy_sample_deter - 0.1
lowerCAmelCase_ = samplea.shape[0]
lowerCAmelCase_ = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase_ = torch.arange(_snake_case )[0:3, None].repeat(1 , _snake_case )
lowerCAmelCase_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase_ = scheduler.batch_step_no_noise(_snake_case , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _snake_case )
lowerCAmelCase_ = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase_ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.full_loop()
lowerCAmelCase_ = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase_ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase_ = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase_ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
lowerCAmelCase_ = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase_ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
lowerCAmelCase_ = torch.sum(torch.abs(_snake_case ) )
lowerCAmelCase_ = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 274
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CustomTokenizer
pass
| 316
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self , _UpperCamelCase , _UpperCamelCase=3 , _UpperCamelCase=32 , _UpperCamelCase=3 , _UpperCamelCase=10 , _UpperCamelCase=[10, 20, 30, 40] , _UpperCamelCase=[1, 1, 2, 1] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="relu" , _UpperCamelCase=3 , _UpperCamelCase=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(A_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = self.get_config()
return config, pixel_values
def UpperCamelCase__ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = FlaxRegNetModel(config=A_ )
lowerCAmelCase__ = model(A_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = FlaxRegNetForImageClassification(config=A_ )
lowerCAmelCase__ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase):
_SCREAMING_SNAKE_CASE : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = FlaxRegNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=A_ , has_text_modality=A_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(A_ )
lowerCAmelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase__ = model_class(A_ )
lowerCAmelCase__ = model(**self._prepare_for_class(A_ , A_ ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(A_ ) , expected_num_stages + 1 )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(A_ , A_ , A_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ = self._prepare_for_class(A_ , A_ )
lowerCAmelCase__ = model_class(A_ )
@jax.jit
def model_jitted(_UpperCamelCase , **_UpperCamelCase ):
return model(pixel_values=A_ , **A_ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase__ = model_jitted(**A_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase__ = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=A_ , return_tensors='np' )
lowerCAmelCase__ = model(**A_ )
# verify the logits
lowerCAmelCase__ = (1, 10_00)
self.assertEqual(outputs.logits.shape , A_ )
lowerCAmelCase__ = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 703
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : str = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
__snake_case : int = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def _UpperCamelCase ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase__ = int(re.match(r'.*layer_(\d*).*' , UpperCamelCase_ )[1] )
layer_number -= 3
return F"h.{layer_number}." + key
def _UpperCamelCase ( UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase__ = re.search(r'[^\d](\d+)$' , str(UpperCamelCase_ ) )
if bit_search is None:
raise ValueError(F"`dtype` is not a valid dtype: {dtype}." )
lowerCAmelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if bloom_config_file == "":
lowerCAmelCase__ = BloomConfig()
else:
lowerCAmelCase__ = BloomConfig.from_json_file(UpperCamelCase_ )
if shard_model:
lowerCAmelCase__ = os.listdir(UpperCamelCase_ )
lowerCAmelCase__ = sorted(filter(lambda UpperCamelCase_ : s.startswith('layer' ) and "model_00" in s , UpperCamelCase_ ) )
lowerCAmelCase__ = {'weight_map': {}, 'metadata': {}}
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomConfig()
for j, file in enumerate(UpperCamelCase_ ):
print('Processing file: {}'.format(UpperCamelCase_ ) )
lowerCAmelCase__ = None
for i in range(UpperCamelCase_ ):
# load all TP files
lowerCAmelCase__ = file.replace('model_00' , F"model_0{i}" )
lowerCAmelCase__ = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(UpperCamelCase_ )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
torch.save(
UpperCamelCase_ , os.path.join(
UpperCamelCase_ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase__ = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) )
lowerCAmelCase__ = BloomConfig()
lowerCAmelCase__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase__ = total_size
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase_ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase__ = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + '\n'
f.write(UpperCamelCase_ )
else:
lowerCAmelCase__ = BloomModel(UpperCamelCase_ )
lowerCAmelCase__ = os.listdir(UpperCamelCase_ )
lowerCAmelCase__ = sorted(filter(lambda UpperCamelCase_ : s.startswith('layer' ) and "model_00" in s , UpperCamelCase_ ) )
lowerCAmelCase__ = None
for i, file in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = None
for i in range(UpperCamelCase_ ):
# load all TP files
lowerCAmelCase__ = file.replace('model_00' , F"model_0{i}" )
lowerCAmelCase__ = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(UpperCamelCase_ )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert not other_keys.unexpected_keys, F"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
lowerCAmelCase__ = set(other_keys.missing_keys )
else:
lowerCAmelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase__ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
lowerCAmelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase_ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
__snake_case : str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 365
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
_UpperCAmelCase = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
_UpperCAmelCase = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
_UpperCAmelCase = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
_UpperCAmelCase = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = ["input_ids"]
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = RESOURCE_FILES_NAMES
def __init__( self , lowercase , lowercase=None , lowercase=False , lowercase="utf8" , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A_ : Optional[int] = do_lower_case
A_ : Any = sentencepiece_model_ckpt
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A_ : Optional[Any] = self.load_vocab(filepath=UpperCamelCase__ )
else:
A_ : Dict = {self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
A_ : Optional[int] = {v: k for k, v in self.vocab.items()}
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if text is None:
return None
A_ : int = self.tokenize(UpperCamelCase__ )
A_ : Optional[int] = "", []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
A_ : Tuple = self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
A_ : str = unicodedata.normalize('NFKC' , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
A_ : int = normalized_text, [], 0
if self.do_lower_case:
A_ : Any = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A_ : int = token[1:]
A_ : int = text[offset:].index(UpperCamelCase__ ) + offset
A_ : Optional[int] = start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A_ : int = end
return token_mapping
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.vocab )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
A_ : Optional[Any] = self.__dict__.copy()
A_ : List[Any] = None
return state
def __setstate__( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A_ : Tuple = {}
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def lowerCAmelCase_ ( self , lowercase , lowercase=False , lowercase=6_4 , lowercase=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
A_ : Dict = True
if self.sp_model_kwargs.get('alpha' ) is not None:
A_ : Tuple = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
A_ : str = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
A_ : Optional[int] = self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
A_ : Tuple = self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ : Dict = []
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
A_ : Union[str, Any] = 0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
A_ : Tuple = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ : Optional[int] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ : List[str] = i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ )
A_ : Tuple = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
A_ : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowerCAmelCase_ ( self , lowercase , lowercase=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowerCAmelCase_ ( self , lowercase , lowercase=None , lowercase=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
A_ : Union[str, Any] = unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = {}
with io.open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
A_ : Optional[int] = line.rstrip('\n' )
A_ : Dict = int(UpperCamelCase__ )
return token_to_idx
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : List[Any] = 0
if os.path.isdir(UpperCamelCase__ ):
A_ : Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
A_ : Any = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
A_ : int = token_index
writer.write(token + '\n' )
index += 1
A_ : List[str] = os.path.join(UpperCamelCase__ , 'sentencepiece.bpe.model' )
with open(UpperCamelCase__ , 'wb' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 558
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> Optional[int]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : Optional[int] = n - 1
lowerCamelCase : int = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : List[str] = 0
while count < prec:
lowerCamelCase : Optional[Any] = random.randint(2 ,n - 1 )
lowerCamelCase : Optional[int] = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : List[Any] = False
break
lowerCamelCase : Optional[int] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 311
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Tuple ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase_ , lowercase_ ) ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Optional[Any] ):
if dataset.ndim != value_array.ndim:
lowercase = (
"""Wrong input data\'s dimensions... """
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowercase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowercase = (
"""Wrong input data\'s shape... """
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowercase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowercase = (
"""Input data have different datatype... """
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowercase_ )
lowercase = []
for value in value_array:
lowercase = euclidean(lowercase_ , dataset[0] )
lowercase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowercase = euclidean(lowercase_ , lowercase_ )
if dist > temp_dist:
lowercase = temp_dist
lowercase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Dict ):
return np.dot(lowercase_ , lowercase_ ) / (norm(lowercase_ ) * norm(lowercase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653
| 0
|
'''simple docstring'''
import os
def UpperCamelCase ( ) -> str:
'''simple docstring'''
with open(os.path.dirname(lowercase_ ) + '''/grid.txt''' ) as f:
lowercase =[] # noqa: E741
for _ in range(2_0 ):
l.append([int(lowercase_ ) for x in f.readline().split()] )
lowercase =0
# right
for i in range(2_0 ):
for j in range(1_7 ):
lowercase =l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowercase =temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
lowercase =l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowercase =temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
lowercase =l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowercase =temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
lowercase =l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowercase =temp
return maximum
if __name__ == "__main__":
print(solution())
| 72
|
'''simple docstring'''
def a__ ( UpperCamelCase_ : int | float | str ):
try:
UpperCAmelCase__ :Union[str, Any] = float(UpperCamelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
UpperCAmelCase__ :List[str] = decimal - int(UpperCamelCase_ )
if fractional_part == 0:
return int(UpperCamelCase_ ), 1
else:
UpperCAmelCase__ :List[Any] = len(str(UpperCamelCase_ ).split('''.''' )[1] )
UpperCAmelCase__ :Tuple = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase__ :int = 10**number_of_frac_digits
UpperCAmelCase__ , UpperCAmelCase__ :List[str] = denominator, numerator
while True:
UpperCAmelCase__ :Optional[Any] = dividend % divisor
if remainder == 0:
break
UpperCAmelCase__ , UpperCAmelCase__ :List[str] = divisor, remainder
UpperCAmelCase__ , UpperCAmelCase__ :Tuple = numerator / divisor, denominator / divisor
return int(UpperCamelCase_ ), int(UpperCamelCase_ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 467
| 0
|
"""simple docstring"""
from math import factorial
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__lowercase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowercase = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 442
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def lowerCAmelCase_ ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__lowercase = min(UpperCamelCase__ ) # min() finds the minimum value
__lowercase = max(UpperCamelCase__ ) # max() finds the maximum value
__lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowercase = 0
for count in range(UpperCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
__lowercase = count + min_val
i += 1
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(UpperCamelCase__ )
print("""Sorted order is:""" , """ """.join(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
| 442
| 1
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _A :
SCREAMING_SNAKE_CASE : float
SCREAMING_SNAKE_CASE : TreeNode | None = None
SCREAMING_SNAKE_CASE : TreeNode | None = None
def A_ ( a ):
"""simple docstring"""
def is_valid_tree(a ) -> bool:
if node is None:
return True
if not isinstance(a , a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(a ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
a , a , a ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , a , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , a )
)
return is_binary_search_tree_recursive_check(a , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 511
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
_UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
_UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
_UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
_UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
_UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
_UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
_UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
_UpperCAmelCase = state_dict['cls.predictions.decoder.weight']
_UpperCAmelCase = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f'''cls.predictions.transform.dense.{w}''']
_UpperCAmelCase = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 715
|
_UpperCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
_UpperCAmelCase = {value: key for key, value in encode_dict.items()}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
UpperCamelCase_ = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
if set(UpperCamelCase_ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCamelCase_ = ""
for word in coded.split():
while len(UpperCamelCase_ ) != 0:
decoded += decode_dict[word[:5]]
UpperCamelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371
| 0
|
'''simple docstring'''
def _snake_case ( A = 3 , A = 7 , A = 1000000 ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
for current_denominator in range(1 , limit + 1 ):
lowerCAmelCase__ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCAmelCase__ = current_numerator
lowerCAmelCase__ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 90
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = '''scheduler_config.json'''
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[Any] = 1
__SCREAMING_SNAKE_CASE : Dict = 2
__SCREAMING_SNAKE_CASE : List[Any] = 3
__SCREAMING_SNAKE_CASE : Any = 4
__SCREAMING_SNAKE_CASE : Any = 5
__SCREAMING_SNAKE_CASE : Union[str, Any] = 6
__SCREAMING_SNAKE_CASE : str = 7
__SCREAMING_SNAKE_CASE : Any = 8
__SCREAMING_SNAKE_CASE : Tuple = 9
__SCREAMING_SNAKE_CASE : int = 1_0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_1
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_2
__SCREAMING_SNAKE_CASE : Dict = 1_3
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_4
@dataclass
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : str = SCHEDULER_CONFIG_NAME
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
@classmethod
def a_ ( cls , lowercase_ = None , lowercase_ = None , lowercase_=False , **lowercase_ , ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=lowercase_ , subfolder=lowercase_ , return_unused_kwargs=lowercase_ , return_commit_hash=lowercase_ , **lowercase_ , )
return cls.from_config(lowercase_ , return_unused_kwargs=lowercase_ , **lowercase_ )
def a_ ( self , lowercase_ , lowercase_ = False , **lowercase_ ) -> str:
self.save_config(save_directory=lowercase_ , push_to_hub=lowercase_ , **lowercase_ )
@property
def a_ ( self ) -> Union[str, Any]:
return self._get_compatibles()
@classmethod
def a_ ( cls ) -> Any:
UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCAmelCase = [
getattr(lowercase_ , lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ , lowercase_ )
]
return compatible_classes
| 373
| 0
|
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__SCREAMING_SNAKE_CASE ="sshleifer/bart-tiny-random"
__SCREAMING_SNAKE_CASE ="patrickvonplaten/t5-tiny-random"
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return AutoConfig.from_pretrained(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , *lowercase_ : int = create_student_by_copying_alternating_layers(__UpperCamelCase ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.num_hidden_layers ,1 )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , *lowercase_ : Any = create_student_by_copying_alternating_layers(__UpperCamelCase ,tempfile.mkdtemp() ,e=1 ,d=__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , *lowercase_ : Any = create_student_by_copying_alternating_layers(__UpperCamelCase ,tempfile.mkdtemp() ,e=1 ,d=__UpperCamelCase )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , *lowercase_ : Tuple = create_student_by_copying_alternating_layers(__UpperCamelCase ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,1 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
create_student_by_copying_alternating_layers(__UpperCamelCase ,tempfile.mkdtemp() ,e=__UpperCamelCase ,d=__UpperCamelCase )
| 477
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowercase__( ):
print('Making key files...' )
make_key_files('rsa' , 10_24 )
print('Key files generation successful.' )
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
print('Generating prime p...' )
lowercase_ : List[str] = rabinMiller.generate_large_prime(__SCREAMING_SNAKE_CASE )
print('Generating prime q...' )
lowercase_ : int = rabinMiller.generate_large_prime(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
lowercase_ : str = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
lowercase_ : List[str] = cryptoMath.find_mod_inverse(__SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
lowercase_ : Any = (n, e)
lowercase_ : int = (n, d)
return (public_key, private_key)
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowercase_ , lowercase_ : int = generate_key(__SCREAMING_SNAKE_CASE )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 477
| 1
|
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase__ : Optional[int] = 6_3_7_8_1_3_7.0
UpperCAmelCase__ : Any = 6_3_5_6_7_5_2.3_1_4_2_4_5
UpperCAmelCase__ : List[str] = 6_37_81_37
def A ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
'''simple docstring'''
lowerCAmelCase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase__ = atan((1 - flattening) * tan(radians(UpperCamelCase_ ) ) )
lowerCAmelCase__ = atan((1 - flattening) * tan(radians(UpperCamelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase__ = haversine_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase__ = (b_lata + b_lata) / 2
lowerCAmelCase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase__ = (sin(UpperCamelCase_ ) ** 2) * (cos(UpperCamelCase_ ) ** 2)
lowerCAmelCase__ = cos(sigma / 2 ) ** 2
lowerCAmelCase__ = (sigma - sin(UpperCamelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase__ = (cos(UpperCamelCase_ ) ** 2) * (sin(UpperCamelCase_ ) ** 2)
lowerCAmelCase__ = sin(sigma / 2 ) ** 2
lowerCAmelCase__ = (sigma + sin(UpperCamelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "sentencepiece.bpe.model"}
a_ = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
a_ = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
a_ = "▁"
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_ = None , **lowercase_ , ):
snake_case_ : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a) if isinstance(_a , _a) else mask_token
snake_case_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
snake_case_ : Any = vocab_file
snake_case_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_a))
snake_case_ : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
snake_case_ : Optional[Any] = len(self.sp_model) - 1
snake_case_ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case__ ( self , lowercase_ , lowercase_ = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
snake_case_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a)
if token_ids_a is None:
return [1] + ([0] * len(_a)) + [1]
return [1] + ([0] * len(_a)) + [1, 1] + ([0] * len(_a)) + [1]
def snake_case__ ( self , lowercase_ , lowercase_ = None):
snake_case_ : str = [self.sep_token_id]
snake_case_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def snake_case__ ( self):
return len(self.sp_model)
def snake_case__ ( self):
snake_case_ : Tuple = {self.convert_ids_to_tokens(_a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case__ ( self , lowercase_):
return self.sp_model.encode(_a , out_type=_a)
def snake_case__ ( self , lowercase_):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : str = self.sp_model.PieceToId(_a)
return spm_id if spm_id else self.unk_token_id
def snake_case__ ( self , lowercase_):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_a)
def snake_case__ ( self , lowercase_):
snake_case_ : Optional[Any] = []
snake_case_ : int = """"""
snake_case_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a) + token
snake_case_ : int = True
snake_case_ : Optional[int] = []
else:
current_sub_tokens.append(_a)
snake_case_ : Optional[Any] = False
out_string += self.sp_model.decode(_a)
return out_string.strip()
def __getstate__( self):
snake_case_ : str = self.__dict__.copy()
snake_case_ : Any = None
return state
def __setstate__( self , lowercase_):
snake_case_ : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
snake_case_ : Optional[int] = {}
snake_case_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self , lowercase_ , lowercase_ = None):
if not os.path.isdir(_a):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ : Dict = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _a)
elif not os.path.isfile(self.vocab_file):
with open(_a , "wb") as fi:
snake_case_ : int = self.sp_model.serialized_model_proto()
fi.write(_a)
return (out_vocab_file,)
| 705
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=32 , lowercase_=3 , lowercase_=4 , lowercase_=[10, 20, 30, 40] , lowercase_=[2, 2, 3, 2] , lowercase_=True , lowercase_=True , lowercase_=37 , lowercase_="gelu" , lowercase_=10 , lowercase_=0.02 , lowercase_=["stage2", "stage3", "stage4"] , lowercase_=[2, 3, 4] , lowercase_=None , ):
snake_case_ : Dict = parent
snake_case_ : int = batch_size
snake_case_ : List[str] = image_size
snake_case_ : Tuple = num_channels
snake_case_ : Dict = num_stages
snake_case_ : Optional[int] = hidden_sizes
snake_case_ : Optional[Any] = depths
snake_case_ : Optional[int] = is_training
snake_case_ : Any = use_labels
snake_case_ : List[str] = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : Dict = num_labels
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = out_features
snake_case_ : List[str] = out_indices
snake_case_ : int = scope
def snake_case__ ( self):
snake_case_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Any = ids_tensor([self.batch_size] , self.num_labels)
snake_case_ : int = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_):
snake_case_ : Dict = ConvNextVaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
snake_case_ : Dict = model(lowercase_)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_):
snake_case_ : Tuple = ConvNextVaForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
snake_case_ : int = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_):
snake_case_ : Tuple = ConvNextVaBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
snake_case_ : Any = model(lowercase_)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
snake_case_ : Dict = None
snake_case_ : str = ConvNextVaBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
snake_case_ : List[str] = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def snake_case__ ( self):
snake_case_ : Optional[int] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs
snake_case_ : int = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case__ ( self):
snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Dict = config_and_inputs
snake_case_ : Union[str, Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def snake_case__ ( self):
snake_case_ : List[Any] = ConvNextVaModelTester(self)
snake_case_ : str = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def snake_case__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds")
def snake_case__ ( self):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings")
def snake_case__ ( self):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking")
def snake_case__ ( self):
pass
def snake_case__ ( self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ : Union[str, Any] = True
if model_class.__name__ in [
*get_values(lowercase_),
*get_values(lowercase_),
]:
continue
snake_case_ : List[Any] = model_class(lowercase_)
model.to(lowercase_)
model.train()
snake_case_ : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
snake_case_ : Union[str, Any] = model(**lowercase_).loss
loss.backward()
def snake_case__ ( self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ : Dict = False
snake_case_ : Any = True
if (
model_class.__name__
in [*get_values(lowercase_), *get_values(lowercase_)]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ : Any = model_class(lowercase_)
model.to(lowercase_)
model.gradient_checkpointing_enable()
model.train()
snake_case_ : Any = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
snake_case_ : Optional[int] = model(**lowercase_).loss
loss.backward()
def snake_case__ ( self):
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(lowercase_)
snake_case_ : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[Any] = [*signature.parameters.keys()]
snake_case_ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_)
def snake_case__ ( self):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def snake_case__ ( self):
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
snake_case_ : int = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
snake_case_ : int = model(**self._prepare_for_class(lowercase_ , lowercase_))
snake_case_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ : Dict = self.model_tester.num_stages
self.assertEqual(len(lowercase_) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def snake_case__ ( self):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def snake_case__ ( self):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Any = ConvNextVaModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case__ ( self):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None
@slow
def snake_case__ ( self):
snake_case_ : List[str] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(lowercase_)
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : Any = preprocessor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
snake_case_ : List[str] = model(**lowercase_)
# verify the logits
snake_case_ : str = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , lowercase_)
snake_case_ : str = torch.tensor([0.9_996, 0.1_966, -0.4_386]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4))
| 92
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = SwinvaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = SwinvaForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = SwinvaForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = SwinvaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = SwinvaModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
lowerCamelCase_ = len(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = config.window_size**2
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
lowerCamelCase_ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCamelCase_ = 2
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swinv2 has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = SwinvaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( A ):
UpperCamelCase__ = args.pruning_method
UpperCamelCase__ = args.threshold
UpperCamelCase__ = args.model_name_or_path.rstrip('''/''' )
UpperCamelCase__ = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
UpperCamelCase__ = torch.load(os.path.join(A , '''pytorch_model.bin''' ) )
UpperCamelCase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
UpperCamelCase__ = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
UpperCamelCase__ = MagnitudeBinarizer.apply(inputs=A , threshold=A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ = TopKBinarizer.apply(A , A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ = ThresholdBinarizer.apply(A , A , A )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
UpperCamelCase__ = name[:-6]
UpperCamelCase__ = model[f"{prefix_}mask_scores"]
UpperCamelCase__ , UpperCamelCase__ = -0.1, 1.1
UpperCamelCase__ = torch.sigmoid(A )
UpperCamelCase__ = s * (r - l) + l
UpperCamelCase__ = s_bar.clamp(min=0.0 , max=1.0 )
UpperCamelCase__ = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
UpperCamelCase__ = os.path.join(
os.path.dirname(A ) , f"bertarized_{os.path.basename(A )}" )
if not os.path.isdir(A ):
shutil.copytree(A , A )
print(f"\nCreated folder {target_model_path}" )
torch.save(A , os.path.join(A , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__magic_name__ =parser.parse_args()
main(args)
| 415
| 0
|
"""simple docstring"""
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
'''simple docstring'''
def __init__(self , a_=None , **a_ ):
'''simple docstring'''
super().__init__(features=a_ )
__snake_case = torch_tensor_kwargs
import torch # noqa import torch at initialization
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
import torch
if isinstance(a_ , a_ ) and column:
if all(
isinstance(a_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a_ )
return column
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
import torch
if isinstance(a_ , (str, bytes, type(a_ )) ):
return value
elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case = {}
if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case = {'''dtype''': torch.intaa}
elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ , PIL.Image.Image ):
__snake_case = np.asarray(a_ )
return torch.tensor(a_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a_ , '''__array__''' ) and not isinstance(a_ , torch.Tensor ):
__snake_case = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , a_ , map_list=a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case = self.numpy_arrow_extractor().extract_row(a_ )
__snake_case = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case = self.numpy_arrow_extractor().extract_column(a_ )
__snake_case = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] )
__snake_case = self.recursive_tensorize(a_ )
__snake_case = self._consolidate(a_ )
return column
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case = self.numpy_arrow_extractor().extract_batch(a_ )
__snake_case = self.python_features_decoder.decode_batch(a_ )
__snake_case = self.recursive_tensorize(a_ )
for column_name in batch:
__snake_case = self._consolidate(batch[column_name] )
return batch
| 710
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='sew-d'
def __init__(self , a_=32 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_=2 , a_=5_12 , a_=2_56 , a_=True , a_=True , a_=("p2c", "c2p") , a_="layer_norm" , a_="gelu_python" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.1 , a_=0.02 , a_=1E-7 , a_=1E-5 , a_="group" , a_="gelu" , a_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , a_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a_=False , a_=1_28 , a_=16 , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_="mean" , a_=False , a_=False , a_=2_56 , a_=0 , a_=1 , a_=2 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
__snake_case : Any = hidden_size
__snake_case : Tuple = feat_extract_norm
__snake_case : int = feat_extract_activation
__snake_case : List[str] = list(a_ )
__snake_case : Optional[Any] = list(a_ )
__snake_case : List[str] = list(a_ )
__snake_case : List[str] = conv_bias
__snake_case : Dict = num_conv_pos_embeddings
__snake_case : str = num_conv_pos_embedding_groups
__snake_case : int = len(self.conv_dim )
__snake_case : List[Any] = num_hidden_layers
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = squeeze_factor
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[Any] = position_buckets
__snake_case : Union[str, Any] = share_att_key
__snake_case : Tuple = relative_attention
__snake_case : str = norm_rel_ebd
__snake_case : Tuple = list(a_ )
__snake_case : Optional[int] = hidden_act
__snake_case : int = num_attention_heads
__snake_case : Optional[Any] = hidden_dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Any = activation_dropout
__snake_case : Tuple = feat_proj_dropout
__snake_case : str = final_dropout
__snake_case : str = layer_norm_eps
__snake_case : Tuple = feature_layer_norm_eps
__snake_case : Tuple = initializer_range
__snake_case : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : Union[str, Any] = apply_spec_augment
__snake_case : str = mask_time_prob
__snake_case : Optional[Any] = mask_time_length
__snake_case : List[Any] = mask_time_min_masks
__snake_case : str = mask_feature_prob
__snake_case : List[str] = mask_feature_length
__snake_case : Optional[int] = mask_feature_min_masks
# ctc loss
__snake_case : Union[str, Any] = ctc_loss_reduction
__snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
__snake_case : str = use_weighted_layer_sum
__snake_case : Any = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 229
| 0
|
'''simple docstring'''
import math
import qiskit
def A_( A : int = 1 , A : int = 1 , A : int = 1):
if (
isinstance(A , A)
or isinstance(A , A)
or isinstance(A , A)
):
raise TypeError('inputs must be integers.')
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.')
if (
(math.floor(A) != input_a)
or (math.floor(A) != input_a)
or (math.floor(A) != carry_in)
):
raise ValueError('inputs must be exact integers.')
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.')
# build registers
UpperCamelCase = qiskit.QuantumRegister(4 , 'qr')
UpperCamelCase = qiskit.ClassicalRegister(2 , 'cr')
# list the entries
UpperCamelCase = [input_a, input_a, carry_in]
UpperCamelCase = qiskit.QuantumCircuit(A , A)
for i in range(0 , 3):
if entry[i] == 2:
quantum_circuit.h(A) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(A) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(A) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3) # ccx = toffoli gate
quantum_circuit.cx(0 , 1)
quantum_circuit.ccx(1 , 2 , 3)
quantum_circuit.cx(1 , 2)
quantum_circuit.cx(0 , 1)
quantum_circuit.measure([2, 3] , A) # measure the last two qbits
UpperCamelCase = qiskit.Aer.get_backend('aer_simulator')
UpperCamelCase = qiskit.execute(A , A , shots=1000)
return job.result().get_counts(A)
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 3
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __A ( a ):
"""simple docstring"""
A_ = 0
A_ = False
A_ = 3.0
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_lowerCamelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def snake_case_( self )-> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase__ = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
lowercase__ = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowercase__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , _lowerCamelCase )
@require_multi_gpu
def snake_case_( self )-> Union[str, Any]:
lowercase__ = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True)
_lowerCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
_lowerCAmelCase = torch.nn.Linear(1_0_0, 2_0_0)
_lowerCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
_lowerCAmelCase = ""
_lowerCAmelCase = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4)
if observed_bucket_cap_map != 1_5:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 161
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
__magic_name__ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__magic_name__ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__magic_name__ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self :List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __UpperCAmelCase ( self :Union[str, Any] , lowercase__ :Union[str, Any] , lowercase__ :Any , lowercase__ :Dict=False ):
if return_pvalue:
lowercase = pearsonr(UpperCamelCase__ , UpperCamelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )}
| 712
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase__ ( __A ):
"""simple docstring"""
lowerCAmelCase__ : torch.FloatTensor
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , _SCREAMING_SNAKE_CASE=(6_4,) , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3_2 , _SCREAMING_SNAKE_CASE="silu" , _SCREAMING_SNAKE_CASE=True , ) -> Optional[Any]:
super().__init__()
a_ : Optional[int] = layers_per_block
a_ : int = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
a_ : Union[str, Any] = None
a_ : Optional[int] = nn.ModuleList([] )
# down
a_ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
a_ : Any = output_channel
a_ : Optional[Any] = block_out_channels[i]
a_ : Union[str, Any] = i == len(UpperCamelCase__ ) - 1
a_ : Any = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
a_ : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
a_ : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
a_ : Tuple = nn.SiLU()
a_ : List[Any] = 2 * out_channels if double_z else out_channels
a_ : Optional[int] = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
a_ : Union[str, Any] = False
def A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
a_ : str = x
a_ : Dict = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_SCREAMING_SNAKE_CASE ):
def custom_forward(*_SCREAMING_SNAKE_CASE ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
a_ : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
a_ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
a_ : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
a_ : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
a_ : Tuple = down_block(UpperCamelCase__ )
# middle
a_ : Union[str, Any] = self.mid_block(UpperCamelCase__ )
# post-process
a_ : str = self.conv_norm_out(UpperCamelCase__ )
a_ : str = self.conv_act(UpperCamelCase__ )
a_ : List[Any] = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , _SCREAMING_SNAKE_CASE=(6_4,) , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3_2 , _SCREAMING_SNAKE_CASE="silu" , _SCREAMING_SNAKE_CASE="group" , ) -> Optional[Any]:
super().__init__()
a_ : Dict = layers_per_block
a_ : Dict = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
a_ : Tuple = None
a_ : List[str] = nn.ModuleList([] )
a_ : Optional[Any] = in_channels if norm_type == "spatial" else None
# mid
a_ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
a_ : Optional[int] = list(reversed(UpperCamelCase__ ) )
a_ : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
a_ : int = output_channel
a_ : Optional[int] = reversed_block_out_channels[i]
a_ : int = i == len(UpperCamelCase__ ) - 1
a_ : Any = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
a_ : List[Any] = output_channel
# out
if norm_type == "spatial":
a_ : List[Any] = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
a_ : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
a_ : Union[str, Any] = nn.SiLU()
a_ : int = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
a_ : Optional[Any] = False
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
a_ : Any = z
a_ : Tuple = self.conv_in(UpperCamelCase__ )
a_ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_SCREAMING_SNAKE_CASE ):
def custom_forward(*_SCREAMING_SNAKE_CASE ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
a_ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
a_ : Tuple = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
a_ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
a_ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
a_ : int = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
a_ : Optional[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
a_ : Union[str, Any] = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
a_ : List[Any] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
a_ : Optional[int] = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
a_ : List[str] = self.conv_norm_out(UpperCamelCase__ )
else:
a_ : List[Any] = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
a_ : Optional[Any] = self.conv_act(UpperCamelCase__ )
a_ : Dict = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="random" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ) -> Optional[int]:
super().__init__()
a_ : Union[str, Any] = n_e
a_ : List[str] = vq_embed_dim
a_ : Any = beta
a_ : Optional[int] = legacy
a_ : Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
a_ : Tuple = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
a_ : List[str] = self.used.shape[0]
a_ : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
a_ : List[Any] = self.re_embed
a_ : Tuple = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
a_ : Dict = n_e
a_ : List[str] = sane_index_shape
def A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a_ : List[Any] = inds.shape
assert len(UpperCamelCase__ ) > 1
a_ : Optional[int] = inds.reshape(ishape[0] , -1 )
a_ : List[str] = self.used.to(UpperCamelCase__ )
a_ : Optional[Any] = (inds[:, :, None] == used[None, None, ...]).long()
a_ : Dict = match.argmax(-1 )
a_ : int = match.sum(2 ) < 1
if self.unknown_index == "random":
a_ : str = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
a_ : Optional[Any] = self.unknown_index
return new.reshape(UpperCamelCase__ )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Any:
a_ : Dict = inds.shape
assert len(UpperCamelCase__ ) > 1
a_ : Union[str, Any] = inds.reshape(ishape[0] , -1 )
a_ : Optional[int] = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
a_ : List[str] = 0 # simply set to zero
a_ : str = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def A ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
a_ : Optional[int] = z.permute(0 , 2 , 3 , 1 ).contiguous()
a_ : str = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
a_ : List[Any] = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
a_ : Tuple = self.embedding(UpperCamelCase__ ).view(z.shape )
a_ : Union[str, Any] = None
a_ : int = None
# compute loss for embedding
if not self.legacy:
a_ : str = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
a_ : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
a_ : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
a_ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
a_ : Optional[int] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
a_ : List[Any] = self.remap_to_used(UpperCamelCase__ )
a_ : int = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
a_ : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
if self.remap is not None:
a_ : int = indices.reshape(shape[0] , -1 ) # add batch axis
a_ : int = self.unmap_to_all(UpperCamelCase__ )
a_ : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
a_ : Any = self.embedding(UpperCamelCase__ )
if shape is not None:
a_ : int = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
a_ : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase__ ( __A ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
a_ : Dict = parameters
a_ , a_ : Dict = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
a_ : Dict = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
a_ : Tuple = deterministic
a_ : Any = torch.exp(0.5 * self.logvar )
a_ : int = torch.exp(self.logvar )
if self.deterministic:
a_ : Optional[int] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def A ( self , _SCREAMING_SNAKE_CASE = None ) -> Optional[Any]:
a_ : Union[str, Any] = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
a_ : Dict = self.mean + self.std * sample
return x
def A ( self , _SCREAMING_SNAKE_CASE=None ) -> Any:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
a_ : Dict = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def A ( self ) -> List[str]:
return self.mean
| 473
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (__A , __A , unittest.TestCase ):
"""simple docstring"""
_a : str = CycleDiffusionPipeline
_a : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
_a : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
_a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
_a : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1_000 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
a_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ = CLIPTextModel(UpperCamelCase__ )
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
"""simple docstring"""
a_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
a_ = image / 2 + 0.5
if str(UpperCamelCase__ ).startswith('mps' ):
a_ = torch.manual_seed(UpperCamelCase__ )
else:
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
a_ = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
"""simple docstring"""
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = CycleDiffusionPipeline(**UpperCamelCase__ )
a_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = pipe(**UpperCamelCase__ )
a_ = output.images
a_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _a ( self ):
"""simple docstring"""
a_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCamelCase__ , 'half' ):
a_ = module.half()
a_ = CycleDiffusionPipeline(**UpperCamelCase__ )
a_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = pipe(**UpperCamelCase__ )
a_ = output.images
a_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def _a ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
"""simple docstring"""
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
a_ = init_image.resize((512, 512) )
a_ = 'CompVis/stable-diffusion-v1-4'
a_ = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
a_ = CycleDiffusionPipeline.from_pretrained(
UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
a_ = 'A black colored car'
a_ = 'A blue colored car'
a_ = torch.manual_seed(0 )
a_ = pipe(
prompt=UpperCamelCase__ , source_prompt=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase__ , output_type='np' , )
a_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _a ( self ):
"""simple docstring"""
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
a_ = init_image.resize((512, 512) )
a_ = 'CompVis/stable-diffusion-v1-4'
a_ = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
a_ = CycleDiffusionPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
a_ = 'A black colored car'
a_ = 'A blue colored car'
a_ = torch.manual_seed(0 )
a_ = pipe(
prompt=UpperCamelCase__ , source_prompt=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase__ , output_type='np' , )
a_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 536
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase :
"""simple docstring"""
_a = 42 # [batch_size x 3]
_a = 42 # [batch_size x 3]
_a = 42 # [batch_size x 3]
_a = 42 # [batch_size x 3]
_a = 42
_a = 42
_a = 42
_a = 42
_a = 42
def lowerCAmelCase__ ( self ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = torch.arange(self.height * self.width )
UpperCamelCase__ :Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCamelCase_ , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ , *UpperCamelCase__ :List[str] = self.shape
UpperCamelCase__ :Optional[Any] = int(np.prod(UpperCamelCase_ ) )
UpperCamelCase__ :Optional[int] = self.get_image_coords()
UpperCamelCase__ :Optional[int] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCamelCase__ :int = self.get_camera_rays(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = rays.view(UpperCamelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ , *UpperCamelCase__ , UpperCamelCase__ :List[str] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCamelCase__ :Tuple = coords.view(UpperCamelCase_ , -1 , 2 )
UpperCamelCase__ :Union[str, Any] = self.resolution()
UpperCamelCase__ :Tuple = self.fov()
UpperCamelCase__ :Union[str, Any] = (flat.float() / (res - 1)) * 2 - 1
UpperCamelCase__ :int = fracs * torch.tan(fov / 2 )
UpperCamelCase__ :int = fracs.view(UpperCamelCase_ , -1 , 2 )
UpperCamelCase__ :Optional[Any] = (
self.z.view(UpperCamelCase_ , 1 , 3 )
+ self.x.view(UpperCamelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCamelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCamelCase__ :Tuple = directions / directions.norm(dim=-1 , keepdim=UpperCamelCase_ )
UpperCamelCase__ :List[str] = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCamelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCamelCase_ , *UpperCamelCase_ , 2 , 3 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCamelCase_ , height=UpperCamelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def a ( __a ) -> DifferentiableProjectiveCamera:
'''simple docstring'''
UpperCamelCase__ :Any = []
UpperCamelCase__ :Dict = []
UpperCamelCase__ :List[str] = []
UpperCamelCase__ :Optional[int] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
UpperCamelCase__ :Tuple = np.array([np.sin(__a ), np.cos(__a ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCamelCase__ :Optional[Any] = -z * 4
UpperCamelCase__ :Tuple = np.array([np.cos(__a ), -np.sin(__a ), 0.0] )
UpperCamelCase__ :List[str] = np.cross(__a , __a )
origins.append(__a )
xs.append(__a )
ys.append(__a )
zs.append(__a )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__a , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__a , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__a , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__a , axis=0 ) ).float() , width=__a , height=__a , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__a )) , )
| 280
|
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = n
UpperCamelCase__ :Tuple = [None] * self.n
UpperCamelCase__ :str = 0 # index of the first element
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :Dict = 0
def __len__( self ):
'''simple docstring'''
return self.size
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.size == 0
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
UpperCamelCase__ :List[Any] = data
UpperCamelCase__ :List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
UpperCamelCase__ :Dict = self.array[self.front]
UpperCamelCase__ :Union[str, Any] = None
UpperCamelCase__ :Union[str, Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 280
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : torch.FloatTensor
class a ( __snake_case , __snake_case ):
@register_to_config
def __init__( self : Any , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 64 , __SCREAMING_SNAKE_CASE : int = 20 , __SCREAMING_SNAKE_CASE : int = 768 , __SCREAMING_SNAKE_CASE : int=77 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : str = "silu" , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "linear" , __SCREAMING_SNAKE_CASE : Optional[str] = "prd" , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> List[Any]:
super().__init__()
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = num_attention_heads * attention_head_dim
lowerCamelCase_ = additional_embeddings
lowerCamelCase_ = time_embed_dim or inner_dim
lowerCamelCase_ = embedding_proj_dim or embedding_dim
lowerCamelCase_ = clip_embed_dim or embedding_dim
lowerCamelCase_ = Timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0 )
lowerCamelCase_ = TimestepEmbedding(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , out_dim=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if embedding_proj_norm_type is None:
lowerCamelCase_ = None
elif embedding_proj_norm_type == "layer":
lowerCamelCase_ = nn.LayerNorm(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
lowerCamelCase_ = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if encoder_hid_proj_type is None:
lowerCamelCase_ = None
elif encoder_hid_proj_type == "linear":
lowerCamelCase_ = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __SCREAMING_SNAKE_CASE ) )
if added_emb_type == "prd":
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , 1 , __SCREAMING_SNAKE_CASE ) )
elif added_emb_type is None:
lowerCamelCase_ = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
lowerCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , activation_fn='gelu' , attention_bias=__SCREAMING_SNAKE_CASE , )
for d in range(__SCREAMING_SNAKE_CASE )
] )
if norm_in_type == "layer":
lowerCamelCase_ = nn.LayerNorm(__SCREAMING_SNAKE_CASE )
elif norm_in_type is None:
lowerCamelCase_ = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
lowerCamelCase_ = nn.LayerNorm(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
lowerCamelCase_ = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , __SCREAMING_SNAKE_CASE , persistent=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = nn.Parameter(torch.zeros(1 , __SCREAMING_SNAKE_CASE ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase ( self : List[str] ) -> Dict[str, AttentionProcessor]:
lowerCamelCase_ = {}
def fn_recursive_add_processors(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : torch.nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, AttentionProcessor] ):
if hasattr(__SCREAMING_SNAKE_CASE , 'set_processor' ):
lowerCamelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return processors
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]:
lowerCamelCase_ = len(self.attn_processors.keys() )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(__SCREAMING_SNAKE_CASE )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : torch.nn.Module , __SCREAMING_SNAKE_CASE : str ):
if hasattr(__SCREAMING_SNAKE_CASE , 'set_processor' ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.set_processor(__SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any ) -> int:
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, float, int] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.BoolTensor] = None , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[Any]:
lowerCamelCase_ = hidden_states.shape[0]
lowerCamelCase_ = timestep
if not torch.is_tensor(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps * torch.ones(__SCREAMING_SNAKE_CASE , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase_ = self.time_proj(__SCREAMING_SNAKE_CASE )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCamelCase_ = timesteps_projected.to(dtype=self.dtype )
lowerCamelCase_ = self.time_embedding(__SCREAMING_SNAKE_CASE )
if self.embedding_proj_norm is not None:
lowerCamelCase_ = self.embedding_proj_norm(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.embedding_proj(__SCREAMING_SNAKE_CASE )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCamelCase_ = self.encoder_hidden_states_proj(__SCREAMING_SNAKE_CASE )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowerCamelCase_ = self.proj_in(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.positional_embedding.to(hidden_states.dtype )
lowerCamelCase_ = []
lowerCamelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(__SCREAMING_SNAKE_CASE )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCamelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCamelCase_ = hidden_states[:, None, :]
lowerCamelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCamelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(__SCREAMING_SNAKE_CASE , -1 , -1 )
additional_embeds.append(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.cat(
__SCREAMING_SNAKE_CASE , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCamelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCamelCase_ = F.pad(
__SCREAMING_SNAKE_CASE , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCamelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCamelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
lowerCamelCase_ = F.pad(__SCREAMING_SNAKE_CASE , (0, self.additional_embeddings) , value=0.0 )
lowerCamelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCamelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCamelCase_ = self.norm_in(__SCREAMING_SNAKE_CASE )
for block in self.transformer_blocks:
lowerCamelCase_ = block(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.norm_out(__SCREAMING_SNAKE_CASE )
if self.prd_embedding is not None:
lowerCamelCase_ = hidden_states[:, -1]
else:
lowerCamelCase_ = hidden_states[:, additional_embeddings_len:]
lowerCamelCase_ = self.proj_to_clip_embeddings(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
lowerCamelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 549
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 549
| 1
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE( a_ , a_ ):
@register_to_config
def __init__( self: List[Any] , *,
UpperCamelCase: int = 4 , UpperCamelCase: int = 7_68 , UpperCamelCase: int , UpperCamelCase: List[str] , ) -> Union[str, Any]:
super().__init__()
snake_case__ = nn.Parameter(torch.zeros(UpperCamelCase ) )
# parameters for additional clip time embeddings
snake_case__ = nn.Linear(UpperCamelCase , UpperCamelCase )
snake_case__ = nn.Linear(UpperCamelCase , UpperCamelCase )
# parameters for encoder hidden states
snake_case__ = clip_extra_context_tokens
snake_case__ = nn.Linear(
UpperCamelCase , self.clip_extra_context_tokens * cross_attention_dim )
snake_case__ = nn.Linear(UpperCamelCase , UpperCamelCase )
snake_case__ = nn.LayerNorm(UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , *, UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: str ) -> Dict:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
snake_case__ = image_embeddings.shape[0]
snake_case__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
snake_case__ = classifier_free_guidance_embeddings.expand(
UpperCamelCase , -1 )
snake_case__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
snake_case__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
snake_case__ = self.embedding_proj(UpperCamelCase )
snake_case__ = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase )
snake_case__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
snake_case__ = self.clip_extra_context_tokens_proj(UpperCamelCase )
snake_case__ = clip_extra_context_tokens.reshape(UpperCamelCase , -1 , self.clip_extra_context_tokens )
snake_case__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase )
snake_case__ = self.text_encoder_hidden_states_norm(UpperCamelCase )
snake_case__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 719
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case__ = model_type_to_module_name(_A )
snake_case__ = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , '__name__' , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case__ = importlib.import_module('transformers' )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def a_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(_A , encoding='utf-8' ) as reader:
return json.load(_A )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[int] ) -> Union[str, Any]:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase )
def lowerCAmelCase_ ( cls: int , UpperCamelCase: int , **UpperCamelCase: str ) -> Optional[Any]:
snake_case__ = kwargs.pop('config' , UpperCamelCase )
snake_case__ = kwargs.pop('trust_remote_code' , UpperCamelCase )
snake_case__ = True
snake_case__ , snake_case__ = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase , **UpperCamelCase )
snake_case__ = config_dict.get('image_processor_type' , UpperCamelCase )
snake_case__ = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case__ = config_dict.pop('feature_extractor_type' , UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
snake_case__ = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoFeatureExtractor']
snake_case__ = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
# It could be in `config.image_processor_type``
snake_case__ = getattr(UpperCamelCase , 'image_processor_type' , UpperCamelCase )
if hasattr(UpperCamelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
snake_case__ = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
snake_case__ = image_processor_class_from_name(UpperCamelCase )
snake_case__ = image_processor_auto_map is not None
snake_case__ = image_processor_class is not None or type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
snake_case__ = resolve_trust_remote_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if has_remote_code and trust_remote_code:
snake_case__ = get_class_from_dynamic_module(
UpperCamelCase , UpperCamelCase , **UpperCamelCase )
snake_case__ = kwargs.pop('code_revision' , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
snake_case__ = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase )]
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Optional[Any] , UpperCamelCase: int ) -> Optional[Any]:
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase , UpperCamelCase )
| 372
| 0
|
"""simple docstring"""
import math
def a ( __UpperCAmelCase : list , __UpperCAmelCase : int ) -> int:
__magic_name__: str = len(__UpperCAmelCase )
__magic_name__: Optional[int] = int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
__magic_name__: Any = 0
while arr[min(__UpperCAmelCase , __UpperCAmelCase ) - 1] < x:
__magic_name__: str = step
step += int(math.floor(math.sqrt(__UpperCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__magic_name__: Tuple = prev + 1
if prev == min(__UpperCAmelCase , __UpperCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
__lowerCamelCase = [int(item) for item in user_input.split(',')]
__lowerCamelCase = int(input('Enter the number to be searched:\n'))
__lowerCamelCase = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f'''Number {x} is at index {res}''')
| 96
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 545
| 0
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
UpperCAmelCase_ : Tuple = img
UpperCAmelCase_ : Tuple = img.shape[1]
UpperCAmelCase_ : Any = img.shape[0]
UpperCAmelCase_ : List[str] = dst_width
UpperCAmelCase_ : Any = dst_height
UpperCAmelCase_ : Optional[Any] = self.src_w / self.dst_w
UpperCAmelCase_ : Optional[Any] = self.src_h / self.dst_h
UpperCAmelCase_ : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_5_5
)
def _UpperCamelCase ( self ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase_ : List[str] = self.img[self.get_y(snake_case_ )][self.get_x(snake_case_ )]
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return int(self.ratio_x * x )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
snake_case__ , snake_case__ : str = 800, 600
snake_case__ : Dict = imread('''image_data/lena.jpg''', 1)
snake_case__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 389
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ :Optional[int] = '''BlipImageProcessor'''
lowerCamelCase_ :Union[str, Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = False
super().__init__(snake_case_ , snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCAmelCase_ : str = self.tokenizer
UpperCAmelCase_ : Optional[int] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
UpperCAmelCase_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
UpperCAmelCase_ : Optional[Any] = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
UpperCAmelCase_ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.tokenizer.model_input_names
UpperCAmelCase_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 389
| 1
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =XCLIPTextConfig()
# derive patch size from model name
__magic_name__ : int =model_name.find("""patch""" )
__magic_name__ : str =int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
__magic_name__ : Dict =XCLIPVisionConfig(patch_size=lowerCamelCase , num_frames=lowerCamelCase )
if "large" in model_name:
__magic_name__ : int =768
__magic_name__ : Tuple =3072
__magic_name__ : str =12
__magic_name__ : Optional[Any] =1024
__magic_name__ : List[str] =4096
__magic_name__ : Union[str, Any] =16
__magic_name__ : Union[str, Any] =24
__magic_name__ : Tuple =768
__magic_name__ : Union[str, Any] =3072
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ : Dict =336
__magic_name__ : Any =XCLIPConfig.from_text_vision_configs(lowerCamelCase , lowerCamelCase )
if "large" in model_name:
__magic_name__ : int =768
return config
def lowerCAmelCase_ ( lowerCamelCase ):
# text encoder
if name == "token_embedding.weight":
__magic_name__ : int =name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
__magic_name__ : Union[str, Any] =name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
__magic_name__ : Union[str, Any] =name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__magic_name__ : int =name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__magic_name__ : Optional[Any] =name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__magic_name__ : Any =name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
__magic_name__ : List[str] =name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ : Optional[Any] =name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
__magic_name__ : str =name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ : Optional[int] =name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
__magic_name__ : Dict =name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
__magic_name__ : Optional[int] =name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
__magic_name__ : Any =name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
__magic_name__ : Optional[Any] =name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
__magic_name__ : Optional[int] =name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
__magic_name__ : List[Any] =name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
__magic_name__ : Optional[int] =name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ : Union[str, Any] =name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
__magic_name__ : List[Any] =name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
__magic_name__ : Union[str, Any] =name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
__magic_name__ : Union[str, Any] =name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
__magic_name__ : int =name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
for key in orig_state_dict.copy().keys():
__magic_name__ : Dict =orig_state_dict.pop(lowerCamelCase )
if "attn.in_proj" in key:
__magic_name__ : Any =key.split(""".""" )
if key.startswith("""visual""" ):
__magic_name__ : Tuple =key_split[3]
__magic_name__ : Dict =config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ : Optional[Any] =val[
:dim, :
]
__magic_name__ : Optional[Any] =val[
dim : dim * 2, :
]
__magic_name__ : Any =val[
-dim:, :
]
else:
__magic_name__ : Union[str, Any] =val[
:dim
]
__magic_name__ : Union[str, Any] =val[
dim : dim * 2
]
__magic_name__ : int =val[
-dim:
]
else:
if "weight" in key:
__magic_name__ : str =val[
:dim, :
]
__magic_name__ : Any =val[
dim : dim * 2, :
]
__magic_name__ : Any =val[
-dim:, :
]
else:
__magic_name__ : Any =val[:dim]
__magic_name__ : List[str] =val[
dim : dim * 2
]
__magic_name__ : Any =val[-dim:]
elif key.startswith("""mit""" ):
__magic_name__ : Dict =key_split[2]
__magic_name__ : str =config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ : Optional[int] =val[:dim, :]
__magic_name__ : Union[str, Any] =val[dim : dim * 2, :]
__magic_name__ : int =val[-dim:, :]
else:
__magic_name__ : List[Any] =val[:dim]
__magic_name__ : Tuple =val[dim : dim * 2]
__magic_name__ : Any =val[-dim:]
else:
__magic_name__ : Union[str, Any] =key_split[2]
__magic_name__ : int =config.text_config.hidden_size
if "weight" in key:
__magic_name__ : List[str] =val[:dim, :]
__magic_name__ : List[str] =val[
dim : dim * 2, :
]
__magic_name__ : List[Any] =val[-dim:, :]
else:
__magic_name__ : Optional[Any] =val[:dim]
__magic_name__ : str =val[
dim : dim * 2
]
__magic_name__ : Optional[int] =val[-dim:]
else:
__magic_name__ : Optional[int] =rename_key(lowerCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ : Any =val.T
__magic_name__ : Optional[Any] =val
return orig_state_dict
def lowerCAmelCase_ ( lowerCamelCase ):
if num_frames == 8:
__magic_name__ : Union[str, Any] ="""eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
__magic_name__ : Union[str, Any] ="""eating_spaghetti.npy"""
elif num_frames == 32:
__magic_name__ : Union[str, Any] ="""eating_spaghetti_32_frames.npy"""
__magic_name__ : str =hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowerCamelCase , repo_type="""dataset""" , )
__magic_name__ : Union[str, Any] =np.load(lowerCamelCase )
return list(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ):
__magic_name__ : Tuple ={
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
__magic_name__ : List[str] =model_to_url[model_name]
__magic_name__ : List[Any] =8
if "16-frames" in model_name:
__magic_name__ : Any =16
elif "shot" in model_name:
__magic_name__ : Optional[int] =32
__magic_name__ : Union[str, Any] =get_xclip_config(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =XCLIPModel(lowerCamelCase )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ : Optional[int] ="""pytorch_model.bin"""
gdown.cached_download(lowerCamelCase , lowerCamelCase , quiet=lowerCamelCase )
__magic_name__ : Optional[int] =torch.load(lowerCamelCase , map_location="""cpu""" )["""model"""]
else:
__magic_name__ : Optional[Any] =torch.hub.load_state_dict_from_url(lowerCamelCase )["""model"""]
__magic_name__ : Optional[Any] =convert_state_dict(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =XCLIPModel(lowerCamelCase )
__magic_name__ , __magic_name__ : Dict =model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ : str =336 if model_name == """xclip-large-patch14-16-frames""" else 224
__magic_name__ : Union[str, Any] =VideoMAEImageProcessor(size=lowerCamelCase )
__magic_name__ : Any =CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
__magic_name__ : List[str] =CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
__magic_name__ : Tuple =XCLIPProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase )
__magic_name__ : str =prepare_video(lowerCamelCase )
__magic_name__ : List[Any] =processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowerCamelCase , return_tensors="""pt""" , padding=lowerCamelCase )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ : Any =model(**lowerCamelCase )
# Verify outputs
__magic_name__ : Dict =outputs.logits_per_video
__magic_name__ : Optional[Any] =logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowerCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ : Union[str, Any] =torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ : Tuple =torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] )
elif model_name == "xclip-base-patch16":
__magic_name__ : str =torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ : List[Any] =torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] )
elif model_name == "xclip-large-patch14":
__magic_name__ : Optional[int] =torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ : Union[str, Any] =torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ : Any =torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ : str =torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ : Dict =torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ : str =torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ : Tuple =torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ : List[str] =torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ : List[str] =torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ : Optional[Any] =torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ : int =torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ : Optional[Any] =torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ : Any =torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ : int =torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowerCamelCase , organization="""nielsr""" )
processor.push_to_hub(lowerCamelCase , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowerCamelCase , organization="""nielsr""" )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
lowerCamelCase : List[str] = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
lowerCamelCase : Optional[int] = {
'allenai/longformer-base-4096': 4_0_9_6,
'allenai/longformer-large-4096': 4_0_9_6,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase__( ):
snake_case__ : int = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
snake_case__ : Dict = bs[:]
snake_case__ : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A )
cs.append(2**8 + n )
n += 1
snake_case__ : List[Any] = [chr(A ) for n in cs]
return dict(zip(A , A ) )
def lowercase__( A ):
snake_case__ : str = set()
snake_case__ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[str] = char
return pairs
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase =VOCAB_FILES_NAMES
_lowerCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase =['input_ids', 'attention_mask']
def __init__( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : str="replace" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : List[str]="</s>" , _lowerCamelCase : Optional[int]="</s>" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : Any="<pad>" , _lowerCamelCase : str="<mask>" , _lowerCamelCase : Tuple=False , **_lowerCamelCase : str , ):
snake_case__ : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
snake_case__ : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
snake_case__ : List[str] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
snake_case__ : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
snake_case__ : Any = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
snake_case__ : Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='utf-8' ) as vocab_handle:
snake_case__ : Union[str, Any] = json.load(_lowerCamelCase )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : List[Any] = errors # how to handle errors in decoding
snake_case__ : int = bytes_to_unicode()
snake_case__ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding='utf-8' ) as merges_handle:
snake_case__ : Optional[int] = merges_handle.read().split('\n' )[1:-1]
snake_case__ : Any = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Optional[Any] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCAmelCase__ ( self : Any ):
return len(self.encoder )
def UpperCAmelCase__ ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : List[Any] , _lowerCamelCase : Dict ):
if token in self.cache:
return self.cache[token]
snake_case__ : Tuple = tuple(_lowerCamelCase )
snake_case__ : List[str] = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
snake_case__ : Dict = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ : List[Any] = bigram
snake_case__ : Optional[Any] = []
snake_case__ : str = 0
while i < len(_lowerCamelCase ):
try:
snake_case__ : List[str] = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : List[str] = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : Tuple = tuple(_lowerCamelCase )
snake_case__ : Dict = new_word
if len(_lowerCamelCase ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(_lowerCamelCase )
snake_case__ : Optional[int] = ' '.join(_lowerCamelCase )
snake_case__ : Tuple = word
return word
def UpperCAmelCase__ ( self : int , _lowerCamelCase : int ):
snake_case__ : Union[str, Any] = []
for token in re.findall(self.pat , _lowerCamelCase ):
snake_case__ : Dict = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(' ' ) )
return bpe_tokens
def UpperCAmelCase__ ( self : List[Any] , _lowerCamelCase : int ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : List[str] ):
return self.decoder.get(_lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
snake_case__ : Dict = ''.join(_lowerCamelCase )
snake_case__ : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : Tuple = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '\n' )
snake_case__ : List[Any] = 0
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
snake_case__ : str = token_index
writer.write(' '.join(_lowerCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Dict = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=False , **_lowerCamelCase : List[str] ):
snake_case__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
snake_case__ : str = ' ' + text
return (text, kwargs)
| 170
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = """megatron-bert"""
def __init__( self , __lowercase=29_056 , __lowercase=1_024 , __lowercase=24 , __lowercase=16 , __lowercase=4_096 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=0 , __lowercase="absolute" , __lowercase=True , **__lowercase , ) -> Dict:
super().__init__(pad_token_id=__lowercase , **__lowercase)
__UpperCamelCase :Optional[Any] = vocab_size
__UpperCamelCase :List[Any] = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :Tuple = num_attention_heads
__UpperCamelCase :Tuple = hidden_act
__UpperCamelCase :Optional[int] = intermediate_size
__UpperCamelCase :Dict = hidden_dropout_prob
__UpperCamelCase :List[Any] = attention_probs_dropout_prob
__UpperCamelCase :Tuple = max_position_embeddings
__UpperCamelCase :List[str] = type_vocab_size
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Optional[int] = layer_norm_eps
__UpperCamelCase :List[Any] = position_embedding_type
__UpperCamelCase :Union[str, Any] = use_cache
| 452
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=False , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=19 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> Optional[Any]:
__UpperCamelCase :Dict = parent
__UpperCamelCase :Optional[Any] = batch_size
__UpperCamelCase :Any = seq_length
__UpperCamelCase :List[str] = is_training
__UpperCamelCase :Any = use_input_mask
__UpperCamelCase :Optional[int] = use_token_type_ids
__UpperCamelCase :List[str] = use_labels
__UpperCamelCase :Tuple = vocab_size
__UpperCamelCase :List[Any] = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Dict = intermediate_size
__UpperCamelCase :List[str] = hidden_act
__UpperCamelCase :Any = hidden_dropout_prob
__UpperCamelCase :Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = max_position_embeddings
__UpperCamelCase :List[Any] = type_vocab_size
__UpperCamelCase :int = type_sequence_label_size
__UpperCamelCase :str = initializer_range
__UpperCamelCase :Optional[Any] = num_labels
__UpperCamelCase :int = num_choices
__UpperCamelCase :Optional[Any] = scope
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :int = None
if self.use_input_mask:
__UpperCamelCase :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Dict = None
__UpperCamelCase :List[Any] = None
__UpperCamelCase :Tuple = None
if self.use_labels:
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :str = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :int = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__lowercase , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :int = EsmForProteinFolding(config=__lowercase).float()
model.to(__lowercase)
model.eval()
__UpperCamelCase :Tuple = model(__lowercase , attention_mask=__lowercase)
__UpperCamelCase :Any = model(__lowercase)
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Dict = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :List[str] = config_and_inputs
__UpperCamelCase :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[int] = False
a__ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
a__ : str = ()
a__ : Tuple = {} if is_torch_available() else {}
a__ : List[Any] = False
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Dict = EsmFoldModelTester(self)
__UpperCamelCase :Dict = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
@unittest.skip('''Does not support attention outputs''')
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip('''Esm does not support embedding resizing''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip('''Esm does not support embedding resizing''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support passing input embeds!''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold does not support head pruning.''')
def UpperCamelCase__ ( self) -> Optional[int]:
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''')
def UpperCamelCase__ ( self) -> str:
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip('''ESMFold only has one output format.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold does not support input chunking.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''')
def UpperCamelCase__ ( self) -> int:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCamelCase__ ( self) -> str:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''')
def UpperCamelCase__ ( self) -> List[Any]:
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''')
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@require_torch
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''').float()
model.eval()
__UpperCamelCase :Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
__UpperCamelCase :List[Any] = model(__lowercase)['''positions''']
__UpperCamelCase :Optional[int] = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __lowercase , atol=1E-4))
| 452
| 1
|
from __future__ import annotations
import math
def _lowerCamelCase ( __A : int ) -> int:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
SCREAMING_SNAKE_CASE = [num for num in range(3, 100001, 2) if not is_prime(num)]
def _lowerCamelCase ( __A : int ) -> Optional[Any]:
if not isinstance(__A , __A ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCAmelCase : Optional[int] = []
for num in range(len(__A ) ):
_UpperCAmelCase : Tuple = 0
while 2 * i * i <= odd_composites[num]:
_UpperCAmelCase : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(__A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__A ) == n:
return list_nums
return []
def _lowerCamelCase ( ) -> Dict:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 485
|
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase )
| 687
| 0
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
a = 100
a = set(range(3, NUM_PRIMES, 2))
primes.add(2)
a = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def a_ ( __UpperCAmelCase ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
snake_case: set[int] =set()
snake_case: int
snake_case: int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a_ ( __UpperCAmelCase = 50_00 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __UpperCAmelCase ):
if len(partition(__UpperCAmelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 347
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : Any = GPTaTokenizer
UpperCAmelCase : Union[str, Any] = GPTaTokenizerFast
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = {"""add_prefix_space""": True}
UpperCAmelCase : str = False
def UpperCamelCase ( self : List[str] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case: Union[str, Any] =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
snake_case: Optional[int] =dict(zip(a_ , range(len(a_ ) ) ) )
snake_case: Optional[Any] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case: Dict ={'unk_token': '<unk>'}
snake_case: List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case: Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a_ ) )
def UpperCamelCase ( self : Optional[Any] , **a_ : List[str] ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCamelCase ( self : Dict , **a_ : Any ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def UpperCamelCase ( self : List[str] , a_ : List[Any] ) -> Union[str, Any]:
snake_case: Any ='lower newer'
snake_case: Tuple ='lower newer'
return input_text, output_text
def UpperCamelCase ( self : int ) -> Any:
snake_case: Any =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case: int ='lower newer'
snake_case: str =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
snake_case: Optional[Any] =tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
snake_case: Optional[int] =tokens + [tokenizer.unk_token]
snake_case: List[Any] =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def UpperCamelCase ( self : Any ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case: Tuple =self.get_tokenizer()
snake_case: List[Any] =self.get_rust_tokenizer(add_prefix_space=a_ )
snake_case: Any ='lower newer'
# Testing tokenization
snake_case: Optional[Any] =tokenizer.tokenize(a_ , add_prefix_space=a_ )
snake_case: Union[str, Any] =rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
snake_case: Dict =tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case: Any =rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
snake_case: str =self.get_rust_tokenizer(add_prefix_space=a_ )
snake_case: Dict =tokenizer.encode(a_ , add_prefix_space=a_ )
snake_case: Dict =rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
snake_case: List[str] =tokens + [rust_tokenizer.unk_token]
snake_case: Optional[int] =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def UpperCamelCase ( self : List[str] , *a_ : Tuple , **a_ : Tuple ) -> Any:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase ( self : Dict , a_ : List[Any]=1_5 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case: Dict =self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
snake_case: List[str] ='This is a simple input'
snake_case: Optional[int] =['This is a simple input 1', 'This is a simple input 2']
snake_case: Dict =('This is a simple input', 'This is a pair')
snake_case: Union[str, Any] =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='max_length' )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='max_length' )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='max_length' , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding='max_length' )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding='max_length' )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding='max_length' , )
def UpperCamelCase ( self : Tuple ) -> List[Any]:
snake_case: Optional[Any] =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
snake_case: List[Any] ='This is a simple input'
snake_case: Tuple =['This is a simple input looooooooong', 'This is a simple input']
snake_case: Union[str, Any] =('This is a simple input', 'This is a pair')
snake_case: List[Any] =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
snake_case: Any =tokenizer.pad_token_id
snake_case: List[str] =tokenizer(a_ , padding='max_length' , max_length=3_0 , return_tensors='np' )
snake_case: Dict =tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='np' )
snake_case: Tuple =tokenizer(*a_ , padding='max_length' , max_length=6_0 , return_tensors='np' )
snake_case: Dict =tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCamelCase ( self : str ) -> Optional[Any]:
snake_case: Tuple ='$$$'
snake_case: Any =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
snake_case: Optional[Any] ='This is a simple input'
snake_case: Any =['This is a simple input 1', 'This is a simple input 2']
snake_case: Any =tokenizer.bos_token_id
snake_case: Dict =tokenizer(a_ )
snake_case: int =tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case: Optional[int] =tokenizer.decode(out_s.input_ids )
snake_case: Optional[Any] =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCamelCase ( self : Optional[int] ) -> Tuple:
pass
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
snake_case: int =[self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case: List[str] ='Encode this.'
snake_case: List[Any] ='This one too please.'
snake_case: Union[str, Any] =tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case: Any =tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
snake_case: Dict =encoded_sequence_dict['input_ids']
snake_case: str =encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(a_ ) , len(a_ ) )
snake_case: Dict =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
snake_case: int =[x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class a_ ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ) -> Optional[int]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
snake_case: Optional[int] =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=a_ )
snake_case: List[Any] ='A photo of a cat'
snake_case: List[Any] =tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('test_opt' )
snake_case: Union[str, Any] =AutoTokenizer.from_pretrained('./test_opt' )
snake_case: Union[str, Any] =tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def UpperCamelCase ( self : Any ) -> Tuple:
snake_case: Optional[Any] =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=a_ )
snake_case: List[str] ='A photo of a cat'
snake_case: Optional[int] =tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
snake_case: Dict =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=a_ )
snake_case: Dict ='bos'
snake_case: Union[str, Any] =tokenizer.get_vocab()['bos']
snake_case: Tuple ='A photo of a cat'
snake_case: str =tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('./tok' )
snake_case: List[str] =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
snake_case: Dict =tokenizer.encode(
a_ , )
self.assertEqual(a_ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 347
| 1
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
a_ : Tuple = trt.Logger(trt.Logger.WARNING)
a_ : int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
a_ : Tuple = logging.getLogger(__name__)
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
a_ : Tuple = parser.parse_args()
if args.tokenizer_name:
a_ : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
a_ : Optional[int] = args.per_device_eval_batch_size
a_ : Dict = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
a_ : List[str] = True
a_ : List[str] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
a_ : Optional[Any] = 'temp_engine/bert-fp16.engine'
if args.inta:
a_ : Optional[int] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
a_ : str = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
a_ : Tuple = [network.get_input(i) for i in range(network.num_inputs)]
a_ : str = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
a_ : Dict = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
a_ : int = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
a_ : List[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = np.asarray(inputs["input_ids"] , dtype=np.intaa )
lowerCamelCase = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
lowerCamelCase = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , UpperCAmelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , UpperCAmelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , UpperCAmelCase__ )
# start time
lowerCamelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(UpperCAmelCase__ ) for d_inp in d_inputs] + [int(UpperCAmelCase__ ), int(UpperCAmelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
cuda.memcpy_dtoh_async(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCamelCase = time.time()
lowerCamelCase = end_time - start_time
lowerCamelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
a_ : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a_ : Dict = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
a_ : Tuple = raw_datasets['validation'].column_names
a_ : Any = 'question' if 'question' in column_names else column_names[0]
a_ : List[Any] = 'context' if 'context' in column_names else column_names[1]
a_ : str = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
a_ : Any = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
a_ : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCamelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=UpperCAmelCase__ , stride=args.doc_stride , return_overflowing_tokens=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCamelCase = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCamelCase = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCamelCase = tokenized_examples.sequence_ids(UpperCAmelCase__ )
lowerCamelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCamelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCamelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
a_ : Tuple = raw_datasets['validation']
# Validation Feature Creation
a_ : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
a_ : Union[str, Any] = default_data_collator
a_ : int = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
a_ : Dict = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__="eval" ):
"""simple docstring"""
lowerCamelCase = postprocess_qa_predictions(
examples=UpperCAmelCase__ , features=UpperCAmelCase__ , predictions=UpperCAmelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=UpperCAmelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCamelCase = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
lowerCamelCase = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
lowerCamelCase = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=UpperCAmelCase__ , label_ids=UpperCAmelCase__ )
a_ : str = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return trt.volume(engine.get_binding_shape(UpperCAmelCase__ ) ) * engine.get_binding_dtype(UpperCAmelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
a_ : str = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
a_ : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
a_ : Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
a_ : Dict = cuda.mem_alloc(h_outputa.nbytes)
a_ : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
a_ : List[Any] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
a_ : Optional[int] = 0.0
a_ : Dict = 0
a_ : List[Any] = timeit.default_timer()
a_ : Optional[Any] = None
for step, batch in enumerate(eval_dataloader):
a_ , a_ : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
a_ , a_ : Dict = outputs
a_ : Dict = torch.tensor(start_logits)
a_ : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
a_ : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
a_ : Union[str, Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
a_ : Dict = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
a_ : Optional[int] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
a_ : Any = nested_truncate(all_preds, len(eval_dataset))
a_ : Optional[int] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
a_ : Dict = post_processing_function(eval_examples, eval_dataset, all_preds)
a_ : Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 623
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a (self ):
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(__a )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet_upscale
lowerCamelCase = DDPMScheduler()
lowerCamelCase = DDIMScheduler(prediction_type="v_prediction" )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=3_50 , )
lowerCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = "A painting of a squirrel eating a burger"
lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet_upscale
lowerCamelCase = DDPMScheduler()
lowerCamelCase = DDIMScheduler(prediction_type="v_prediction" )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=3_50 , )
lowerCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = "A painting of a squirrel eating a burger"
lowerCamelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase = output.images
assert image.shape[0] == 2
lowerCamelCase = torch.Generator(device=__a ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.dummy_cond_unet_upscale
lowerCamelCase = DDPMScheduler()
lowerCamelCase = DDIMScheduler(prediction_type="v_prediction" )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase = unet.half()
lowerCamelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=3_50 , )
lowerCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = "A painting of a squirrel eating a burger"
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images
lowerCamelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
'''simple docstring'''
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
lowerCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase = "a cat sitting on a park bench"
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _a (self ):
'''simple docstring'''
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
lowerCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCamelCase = "a cat sitting on a park bench"
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__a , image=__a , generator=__a , output_type="np" , )
lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a (self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCamelCase = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase = "a cat sitting on a park bench"
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , )
lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 623
| 1
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCamelCase (a_ :int) -> str:
lowercase :List[Any] = torch.exp(a_)
lowercase :str = torch.sum(a_ , dim=1) # sum of exp(x_i)
lowercase :Tuple = torch.sum(x * exp_x , dim=1) # sum of x_i * exp(x_i)
return torch.log(a_) - B / A
class __magic_name__ ( nn.Module ):
def __init__( self : Tuple , snake_case__ : str ):
'''simple docstring'''
super().__init__()
lowercase :List[Any] = config.output_attentions
lowercase :int = config.output_hidden_states
lowercase :str = nn.ModuleList([BertLayer(snake_case__ ) for _ in range(config.num_hidden_layers )] )
lowercase :List[Any] = nn.ModuleList([BertHighway(snake_case__ ) for _ in range(config.num_hidden_layers )] )
lowercase :str = [-1 for _ in range(config.num_hidden_layers )]
def __snake_case ( self : Any , snake_case__ : Any ):
'''simple docstring'''
if (type(snake_case__ ) is float) or (type(snake_case__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowercase :Union[str, Any] = x
else:
lowercase :List[str] = x
def __snake_case ( self : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :List[str] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __snake_case ( self : str , snake_case__ : List[str] , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : Dict=None , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
lowercase :Optional[int] = ()
lowercase :str = ()
lowercase :str = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowercase :Any = all_hidden_states + (hidden_states,)
lowercase :Union[str, Any] = layer_module(
snake_case__ , snake_case__ , head_mask[i] , snake_case__ , snake_case__ )
lowercase :Tuple = layer_outputs[0]
if self.output_attentions:
lowercase :Optional[Any] = all_attentions + (layer_outputs[1],)
lowercase :str = (hidden_states,)
if self.output_hidden_states:
lowercase :Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase :Tuple = current_outputs + (all_attentions,)
lowercase :Any = self.highway[i](snake_case__ )
# logits, pooled_output
if not self.training:
lowercase :Optional[Any] = highway_exit[0]
lowercase :Tuple = entropy(snake_case__ )
lowercase :Union[str, Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase :Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase :Union[str, Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(snake_case__ , i + 1 )
else:
lowercase :List[Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase :List[Any] = all_hidden_states + (hidden_states,)
lowercase :int = (hidden_states,)
if self.output_hidden_states:
lowercase :Optional[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase :str = outputs + (all_attentions,)
lowercase :Union[str, Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , __UpperCAmelCase , )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Dict , snake_case__ : Optional[int] ):
'''simple docstring'''
super().__init__(snake_case__ )
lowercase :int = config
lowercase :Optional[Any] = BertEmbeddings(snake_case__ )
lowercase :Optional[Any] = DeeBertEncoder(snake_case__ )
lowercase :List[str] = BertPooler(snake_case__ )
self.init_weights()
def __snake_case ( self : List[str] ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return self.embeddings.word_embeddings
def __snake_case ( self : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
lowercase :List[str] = value
def __snake_case ( self : int , snake_case__ : Dict ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(snake_case__ )
@add_start_docstrings_to_model_forward(snake_case__ )
def __snake_case ( self : Optional[Any] , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : str=None , snake_case__ : Any=None , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=None , snake_case__ : int=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
lowercase :Tuple = input_ids.size()
elif inputs_embeds is not None:
lowercase :str = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
lowercase :List[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase :Optional[int] = torch.ones(snake_case__ , device=snake_case__ )
if encoder_attention_mask is None:
lowercase :Optional[int] = torch.ones(snake_case__ , device=snake_case__ )
if token_type_ids is None:
lowercase :Tuple = torch.zeros(snake_case__ , dtype=torch.long , device=snake_case__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase :torch.Tensor = self.get_extended_attention_mask(snake_case__ , snake_case__ , snake_case__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase :Tuple = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase :int = encoder_attention_mask[:, None, None, :]
lowercase :Optional[int] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowercase :str = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase :str = self.get_head_mask(snake_case__ , self.config.num_hidden_layers )
lowercase :int = self.embeddings(
input_ids=snake_case__ , position_ids=snake_case__ , token_type_ids=snake_case__ , inputs_embeds=snake_case__ )
lowercase :List[Any] = self.encoder(
snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
lowercase :Optional[Any] = encoder_outputs[0]
lowercase :List[str] = self.pooler(snake_case__ )
lowercase :int = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Any , snake_case__ : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
lowercase :Any = message
lowercase :Any = exit_layer # start from 1!
class __magic_name__ ( nn.Module ):
def __init__( self : Dict , snake_case__ : Dict ):
'''simple docstring'''
super().__init__()
lowercase :Optional[int] = BertPooler(snake_case__ )
lowercase :List[Any] = nn.Dropout(config.hidden_dropout_prob )
lowercase :Optional[int] = nn.Linear(config.hidden_size , config.num_labels )
def __snake_case ( self : Optional[int] , snake_case__ : Optional[int] ):
'''simple docstring'''
lowercase :Tuple = encoder_outputs[0]
lowercase :Any = self.pooler(snake_case__ )
# "return" pooler_output
# BertModel
lowercase :Optional[Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase :str = bmodel_output[1]
lowercase :str = self.dropout(snake_case__ )
lowercase :Tuple = self.classifier(snake_case__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __UpperCAmelCase , )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(snake_case__ )
lowercase :List[Any] = config.num_labels
lowercase :Dict = config.num_hidden_layers
lowercase :int = DeeBertModel(snake_case__ )
lowercase :Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
lowercase :Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case__ )
def __snake_case ( self : List[str] , snake_case__ : int=None , snake_case__ : List[Any]=None , snake_case__ : Dict=None , snake_case__ : Optional[int]=None , snake_case__ : List[str]=None , snake_case__ : Any=None , snake_case__ : List[str]=None , snake_case__ : str=-1 , snake_case__ : int=False , ):
'''simple docstring'''
lowercase :Union[str, Any] = self.num_layers
try:
lowercase :Optional[Any] = self.bert(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase :str = outputs[1]
lowercase :Dict = self.dropout(snake_case__ )
lowercase :Dict = self.classifier(snake_case__ )
lowercase :List[str] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase :Optional[int] = e.message
lowercase :int = e.exit_layer
lowercase :List[str] = outputs[0]
if not self.training:
lowercase :int = entropy(snake_case__ )
lowercase :Any = []
lowercase :str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase :str = MSELoss()
lowercase :Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase :Optional[int] = CrossEntropyLoss()
lowercase :Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowercase :Optional[int] = []
for highway_exit in outputs[-1]:
lowercase :Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowercase :Any = MSELoss()
lowercase :Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase :List[Any] = CrossEntropyLoss()
lowercase :Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case__ )
if train_highway:
lowercase :int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowercase :List[str] = (loss,) + outputs
if not self.training:
lowercase :Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase :List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 714
|
"""simple docstring"""
import argparse
import copy
def lowerCamelCase (a_ :Union[str, Any]) -> Tuple:
lowercase :Dict = {}
with open(a_) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase :List[str] = []
_list.append([line.split()[1], line.split()[2]])
lowercase :Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]])
if line.split()[1] not in dict_of_neighbours:
lowercase :List[Any] = []
_list.append([line.split()[0], line.split()[2]])
lowercase :Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]])
return dict_of_neighbours
def lowerCamelCase (a_ :Any , a_ :Dict) -> Any:
with open(a_) as f:
lowercase :Any = f.read(1)
lowercase :Any = start_node
lowercase :Any = []
lowercase :Union[str, Any] = start_node
lowercase :int = 0
while visiting not in first_solution:
lowercase :int = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1]) < int(a_) and k[0] not in first_solution:
lowercase :str = k[1]
lowercase :str = k[0]
first_solution.append(a_)
lowercase :int = distance_of_first_solution + int(a_)
lowercase :Dict = best_node
first_solution.append(a_)
lowercase :int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase :List[str] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1])
- 1_0000
)
return first_solution, distance_of_first_solution
def lowerCamelCase (a_ :List[str] , a_ :str) -> str:
lowercase :Any = []
for n in solution[1:-1]:
lowercase :int = solution.index(a_)
for kn in solution[1:-1]:
lowercase :Union[str, Any] = solution.index(a_)
if n == kn:
continue
lowercase :int = copy.deepcopy(a_)
lowercase :str = kn
lowercase :List[Any] = n
lowercase :int = 0
for k in _tmp[:-1]:
lowercase :Tuple = _tmp[_tmp.index(a_) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase :Tuple = distance + int(i[1])
_tmp.append(a_)
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp)
lowercase :Dict = len(neighborhood_of_solution[0]) - 1
neighborhood_of_solution.sort(key=lambda a_: x[index_of_last_item_in_the_list])
return neighborhood_of_solution
def lowerCamelCase (a_ :int , a_ :Optional[int] , a_ :List[Any] , a_ :Any , a_ :Optional[Any]) -> List[Any]:
lowercase :Union[str, Any] = 1
lowercase :str = first_solution
lowercase :int = []
lowercase :int = distance_of_first_solution
lowercase :List[str] = solution
while count <= iters:
lowercase :Optional[Any] = find_neighborhood(a_ , a_)
lowercase :Any = 0
lowercase :Optional[Any] = neighborhood[index_of_best_solution]
lowercase :int = len(a_) - 1
lowercase :Dict = False
while not found:
lowercase :List[str] = 0
while i < len(a_):
if best_solution[i] != solution[i]:
lowercase :Tuple = best_solution[i]
lowercase :Tuple = solution[i]
break
lowercase :List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node])
lowercase :Tuple = True
lowercase :Optional[int] = best_solution[:-1]
lowercase :Any = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase :Union[str, Any] = cost
lowercase :Optional[Any] = solution
else:
lowercase :Dict = index_of_best_solution + 1
lowercase :int = neighborhood[index_of_best_solution]
if len(a_) >= size:
tabu_list.pop(0)
lowercase :int = count + 1
return best_solution_ever, best_cost
def lowerCamelCase (a_ :Tuple=None) -> Any:
lowercase :Tuple = generate_neighbours(args.File)
lowercase , lowercase :List[str] = generate_first_solution(
args.File , a_)
lowercase , lowercase :Union[str, Any] = tabu_search(
a_ , a_ , a_ , args.Iterations , args.Size , )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""")
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 475
| 0
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
__UpperCAmelCase = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def lowercase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
with open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f:
a__ : str = json.loads(f.read() )
a__ : Union[str, Any] = collections.OrderedDict()
a__ : Tuple = collections.OrderedDict()
a__ : Optional[Any] = collections.OrderedDict()
with open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f:
a__ : Union[str, Any] = f.readlines()
a__ : Optional[Any] = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowerCAmelCase__ ):
a__ : List[Any] = b
a__ : str = idx
for wd in b:
a__ : int = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , a_ : Tuple , a_ : Tuple , a_ : List[Any]="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Tuple="<|startoftext|>" , a_ : List[Any]="<|endoftext|>" , a_ : Dict=False , **a_ : Any , ) -> int:
'''simple docstring'''
super().__init__(
unk_token=a_ , pad_token=a_ , bos_token=a_ , eos_token=a_ , do_clean_text=a_ , **a_ , )
if not os.path.isfile(a_ ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(a_ ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
a__ : Optional[int] = do_clean_text
a__ , a__ , a__ , a__ : Any = load_vocab_and_emoji(a_ , a_ )
a__ : Optional[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase ( self : Tuple , a_ : Any ) -> List[str]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a_ , clean=self.do_clean_text )
def UpperCAmelCase ( self : str , a_ : List[Any] ) -> int:
'''simple docstring'''
return self.vocab.get(a_ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self : int , a_ : Any ) -> List[Any]:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a_ )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = "".join(a_ ).strip()
return out_string
def UpperCAmelCase ( self : Union[str, Any] , a_ : "Conversation" ) -> List[int]:
'''simple docstring'''
a__ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a_ , add_special_tokens=a_ ) + [self.eos_token_id] )
if len(a_ ) > self.model_max_length:
a__ : str = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase ( self : Any , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Dict = 0
if os.path.isdir(a_ ):
a__ : Optional[Any] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a__ : List[str] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
a__ : int = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a__ : Optional[int] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a_ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
a__ : Tuple = token_index
writer.write(",".join(a_ ) + "\n" )
index += 1
with open(a_ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , a_ )
return vocab_file, emoji_file
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : Optional[int] , a_ : Dict , a_ : Optional[Any] , a_ : int ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = vocab # same as swe
a__ : List[Any] = ids_to_tokens # same as bpe
a__ : Dict = emoji
a__ : int = np.max([len(a_ ) for w in self.vocab.keys()] )
a__ : int = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
a__ : Tuple = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
a__ : Optional[int] = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
a__ : Any = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
a__ : List[str] = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
a__ : Tuple = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
a__ : List[str] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a__ : Optional[Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a__ : Union[str, Any] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : Dict ) -> Optional[int]:
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase ( self : Optional[int] , a_ : Any ) -> Any:
'''simple docstring'''
a__ : int = self.content_repattera.sub("<URL>" , a_ )
a__ : Optional[Any] = self.content_repattera.sub("<EMAIL>" , a_ )
a__ : Any = self.content_repattera.sub("<TEL>" , a_ )
a__ : Union[str, Any] = self.content_repattera.sub("<DATE>" , a_ )
a__ : Any = self.content_repattera.sub("<DATE>" , a_ )
a__ : Any = self.content_repattera.sub("<PRICE>" , a_ )
a__ : List[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
a__ : Tuple = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def UpperCAmelCase ( self : int , a_ : int , a_ : List[str]=False ) -> Tuple:
'''simple docstring'''
a__ : Dict = text.replace(" " , "<SP>" )
a__ : Optional[Any] = text.replace(" " , "<SP>" )
a__ : List[str] = text.replace("\r\n" , "<BR>" )
a__ : Optional[Any] = text.replace("\n" , "<BR>" )
a__ : Tuple = text.replace("\r" , "<BR>" )
a__ : Any = text.replace("\t" , "<TAB>" )
a__ : str = text.replace("—" , "ー" )
a__ : List[Any] = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
a__ : Tuple = text.replace(a_ , a_ )
if clean:
a__ : Union[str, Any] = self.clean_text(a_ )
def check_simbol(a_ : Dict ):
a__ : int = x.encode()
if len(a_ ) == 1 and len(a_ ) == 2:
a__ : Dict = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(a_ : Union[str, Any] ):
a__ : int = x.encode()
if len(a_ ) == 1 and len(a_ ) == 3:
a__ : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
a__ : Union[str, Any] = 0
a__ : Dict = []
while pos < len(a_ ):
a__ : Any = min(len(a_ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
a__ : Any = [] # (token_id, token, pos)
for e in range(a_ , a_ , -1 ):
a__ : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a_ ) > 2:
a__ : List[str] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(a_ ) > 0:
# the smallest token_id is adopted
a__ , a__ , a__ : List[Any] = sorted(a_ , key=lambda a_ : x[0] )[0]
result.append(a_ )
a__ : Optional[Any] = e
else:
a__ : Optional[Any] = pos + 1
a__ : Optional[Any] = text[pos:end]
if check_simbol(a_ ):
result.append("<KIGOU>" )
elif checkuae(a_ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
a__ : Optional[Any] = end
return result
def UpperCAmelCase ( self : int , a_ : Dict , a_ : int="\n" ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = []
a__ : Dict = []
a__ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(a_ ) > 0:
words.append(bytearray(a_ ).decode("utf-8" , errors="replace" ) )
a__ : List[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(a_ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(a_ )
if len(a_ ) > 0:
words.append(bytearray(a_ ).decode("utf-8" , errors="replace" ) )
a__ : Dict = "".join(a_ )
return text
| 642
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __UpperCAmelCase ( unittest.TestCase , _UpperCamelCase ):
def UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Any = load_tool("text-classification" )
self.tool.setup()
a__ : List[Any] = load_tool("text-classification" , remote=a_ )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
a__ : str = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a_ , "positive" )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a_ , "positive" )
def UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ : List[str] = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a_ , "positive" )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Tuple = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a_ , "positive" )
| 642
| 1
|
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
lowerCamelCase_ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase_ ) )
return round(lowerCamelCase_ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase_ = Vector()
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase ) , "(0,0,0,0,0,1)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase ) , 4 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2] )
lowerCamelCase_ = Vector([1, 2, 3, 4, 5] )
lowerCamelCase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase_ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowercase , lowercase ) ) , "(3,4,7)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase_ = x.copy()
self.assertEqual(str(lowercase ) , str(lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowercase ) , "(0,1,0)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowercase , lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase , lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase_ = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 313
| 0
|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCAmelCase ( _lowercase : int ) -> Optional[Any]:
"""simple docstring"""
return EnvironmentCommand()
class __a ( _UpperCAmelCase ):
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = parser.add_parser('''env''' )
download_parser.set_defaults(func=__UpperCamelCase )
def lowerCamelCase_ ( self ):
'''simple docstring'''
lowerCAmelCase_ = huggingface_hub.__version__
lowerCAmelCase_ = '''not installed'''
lowerCAmelCase_ = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ = torch.__version__
lowerCAmelCase_ = torch.cuda.is_available()
lowerCAmelCase_ = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase_ = transformers.__version__
lowerCAmelCase_ = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase_ = accelerate.__version__
lowerCAmelCase_ = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase_ = xformers.__version__
lowerCAmelCase_ = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__UpperCamelCase ) )
return info
@staticmethod
def lowerCamelCase_ ( UpperCAmelCase ):
'''simple docstring'''
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 552
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : Dict = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def _UpperCamelCase ( _A , _A , _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = state_dict.pop(_A )
_UpperCAmelCase = val
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
return new_state_dict
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:2_5_6, :]
_UpperCAmelCase = in_proj_bias[:2_5_6]
_UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:2_5_6, :]
_UpperCAmelCase = in_proj_bias[:2_5_6]
_UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
_UpperCAmelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCAmelCase = in_proj_weight_cross_attn[:2_5_6, :]
_UpperCAmelCase = in_proj_bias_cross_attn[:2_5_6]
_UpperCAmelCase = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias_cross_attn[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight_cross_attn[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias_cross_attn[-2_5_6:]
def _UpperCamelCase ( _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = image.size
_UpperCAmelCase = max(_A , _A )
_UpperCAmelCase = 8_0_0 if """detection""" in checkpoint_url else 1_0_0_0
_UpperCAmelCase = target_max_size / current_max_size
_UpperCAmelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = F.to_tensor(_A )
_UpperCAmelCase = F.normalize(_A , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
_UpperCAmelCase = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_A , _A , _A )
_UpperCAmelCase = rename_backbone_keys(_A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_UpperCAmelCase = state_dict.pop(_A )
_UpperCAmelCase = val
# create HuggingFace model and load state dict
_UpperCAmelCase = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCAmelCase = 1_5
_UpperCAmelCase = 2
_UpperCAmelCase = {0: """table""", 1: """table rotated"""}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1_2_5
_UpperCAmelCase = 6
_UpperCAmelCase = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = DetrImageProcessor(
format="""coco_detection""" , max_size=8_0_0 if """detection""" in checkpoint_url else 1_0_0_0 )
_UpperCAmelCase = TableTransformerForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion
_UpperCAmelCase = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
_UpperCAmelCase = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=_A )
_UpperCAmelCase = Image.open(_A ).convert("""RGB""" )
_UpperCAmelCase = normalize(resize(_A , _A ) ).unsqueeze(0 )
_UpperCAmelCase = model(_A )
if "detection" in checkpoint_url:
_UpperCAmelCase = (1, 1_5, 3)
_UpperCAmelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
_UpperCAmelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
_UpperCAmelCase = (1, 1_2_5, 7)
_UpperCAmelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
_UpperCAmelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
_UpperCAmelCase = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_A )
image_processor.push_to_hub(_A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 555
| 0
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCamelCase__ =get_logger(__name__)
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = 'dummy_data'
__snake_case = 'datasets'
__snake_case = False
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , ) -> int:
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : List[Any] = dataset_name
_SCREAMING_SNAKE_CASE : Optional[int] = cache_dir
_SCREAMING_SNAKE_CASE : Optional[int] = use_local_dummy_data
_SCREAMING_SNAKE_CASE : Tuple = config
# download_callbacks take a single url as input
_SCREAMING_SNAKE_CASE : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_SCREAMING_SNAKE_CASE : Optional[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_SCREAMING_SNAKE_CASE : Union[str, Any] = str(__lowerCamelCase )
# to be downloaded
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : List[str] = None
@property
def UpperCamelCase_ ( self ) -> int:
if self._dummy_file is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self ) -> Union[str, Any]:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_SCREAMING_SNAKE_CASE : str = cached_path(
__lowerCamelCase , cache_dir=self.cache_dir , extract_compressed_file=__lowerCamelCase , force_extract=__lowerCamelCase )
return os.path.join(__lowerCamelCase , self.dummy_file_name )
@property
def UpperCamelCase_ ( self ) -> List[str]:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase_ ( self ) -> List[Any]:
if self._bucket_url is None:
_SCREAMING_SNAKE_CASE : List[str] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self ) -> Any:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self , __lowerCamelCase , *__lowerCamelCase ) -> Optional[int]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_SCREAMING_SNAKE_CASE : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return self.create_dummy_data_dict(__lowerCamelCase , __lowerCamelCase )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self.create_dummy_data_list(__lowerCamelCase , __lowerCamelCase )
else:
return self.create_dummy_data_single(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , *__lowerCamelCase ) -> Any:
return self.download_and_extract(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
return self.download_and_extract(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ) -> Tuple:
return path
def UpperCamelCase_ ( self ) -> Dict:
return {}
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
for single_url in single_urls:
download_callback(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = single_urls
download_callback(__lowerCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = [os.path.join(__lowerCamelCase , urllib.parse.quote_plus(Path(__lowerCamelCase ).name ) ) for x in single_urls]
else:
_SCREAMING_SNAKE_CASE : Dict = single_urls
_SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(__lowerCamelCase , urllib.parse.quote_plus(Path(__lowerCamelCase ).name ) )
_SCREAMING_SNAKE_CASE : Dict = value
# make sure that values are unique
if all(isinstance(__lowerCamelCase , __lowerCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_SCREAMING_SNAKE_CASE : Optional[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_SCREAMING_SNAKE_CASE : Tuple = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __lowerCamelCase ) ) for url in data_url )
_SCREAMING_SNAKE_CASE : Optional[int] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_SCREAMING_SNAKE_CASE : int = [data_url[0]] * len(__lowerCamelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__lowerCamelCase )
return dummy_data_list
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
for download_callback in self.download_callbacks:
download_callback(__lowerCamelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__lowerCamelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCamelCase_ ( self ) -> str:
pass
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
def _iter_archive_members(__lowerCamelCase ):
# this preserves the order of the members inside the ZIP archive
_SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.dummy_file ).parent
_SCREAMING_SNAKE_CASE : Optional[int] = path.relative_to(__lowerCamelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_SCREAMING_SNAKE_CASE : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = Path(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = _iter_archive_members(__lowerCamelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__lowerCamelCase ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCamelCase ):
if os.path.basename(__lowerCamelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCamelCase ):
if os.path.basename(__lowerCamelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__lowerCamelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__lowerCamelCase , __lowerCamelCase )
| 714
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
set_seed(770)
UpperCamelCase__ ={
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
UpperCamelCase__ ={
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
UpperCamelCase__ =os.path.dirname(os.path.abspath(__file__))
UpperCamelCase__ =os.path.join(os.path.expanduser('~'), '.cache')
UpperCamelCase__ =os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCamelCase, REMOTE_MODEL_PATHS[key]["file_name"] )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
hf_hub_download(repo_id=__lowerCamelCase, filename=__lowerCamelCase, local_dir=__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase="text" ):
if model_type == "text":
_SCREAMING_SNAKE_CASE : List[Any] = BarkSemanticModel
_SCREAMING_SNAKE_CASE : Any = BarkSemanticConfig
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE : List[str] = BarkCoarseModel
_SCREAMING_SNAKE_CASE : Any = BarkCoarseConfig
_SCREAMING_SNAKE_CASE : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE : Optional[int] = BarkFineModel
_SCREAMING_SNAKE_CASE : List[str] = BarkFineConfig
_SCREAMING_SNAKE_CASE : Optional[int] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : List[str] = f"""{model_type}_small""" if use_small else model_type
_SCREAMING_SNAKE_CASE : Optional[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCamelCase ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"], model_info["file_name"] )
_SCREAMING_SNAKE_CASE : str = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )
# this is a hack
_SCREAMING_SNAKE_CASE : Any = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE : Optional[int] = model_args["vocab_size"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE : List[Any] = model_args.pop("n_head" )
_SCREAMING_SNAKE_CASE : Dict = model_args.pop("n_embd" )
_SCREAMING_SNAKE_CASE : Tuple = model_args.pop("n_layer" )
_SCREAMING_SNAKE_CASE : Tuple = ConfigClass(**checkpoint["model_args"] )
_SCREAMING_SNAKE_CASE : int = ModelClass(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfigClass()
_SCREAMING_SNAKE_CASE : Optional[int] = model_generation_config
_SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint["model"]
# fixup checkpoint
_SCREAMING_SNAKE_CASE : Optional[Any] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE : Optional[int] = k[len(__lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE : Tuple = new_k.replace(__lowerCamelCase, new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE : int = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_SCREAMING_SNAKE_CASE : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE : List[str] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(__lowerCamelCase ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(__lowerCamelCase ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = model.num_parameters(exclude_embeddings=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1e6, 1 )}M params, {round(__lowerCamelCase, 3 )} loss""" )
model.eval()
model.to(__lowerCamelCase )
del checkpoint, state_dict
return model
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" # do conversion on cpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = _get_ckpt_path(__lowerCamelCase, use_small=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = _load_model(__lowerCamelCase, __lowerCamelCase, model_type=__lowerCamelCase, use_small=__lowerCamelCase )
# load bark initial model
_SCREAMING_SNAKE_CASE : Union[str, Any] = _bark_load_model(__lowerCamelCase, "cpu", model_type=__lowerCamelCase, use_small=__lowerCamelCase )
if model_type == "text":
_SCREAMING_SNAKE_CASE : str = bark_model["model"]
if model.num_parameters(exclude_embeddings=__lowerCamelCase ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE : Optional[Any] = 5
_SCREAMING_SNAKE_CASE : Optional[int] = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE : Any = torch.randint(256, (batch_size, sequence_length), dtype=torch.int )
_SCREAMING_SNAKE_CASE : Optional[int] = bark_model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
# take last logits
_SCREAMING_SNAKE_CASE : List[str] = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE : Tuple = 3
_SCREAMING_SNAKE_CASE : Union[str, Any] = 8
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int )
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = bark_model(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Dict = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Dict = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE : int = BarkSemanticModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = BarkCoarseModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkFineModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE : Any = BarkConfig.from_sub_model_configs(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE : str = BarkModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = semantic
_SCREAMING_SNAKE_CASE : Tuple = coarseAcoustic
_SCREAMING_SNAKE_CASE : List[str] = fineAcoustic
_SCREAMING_SNAKE_CASE : Tuple = codec
_SCREAMING_SNAKE_CASE : Tuple = bark_generation_config
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
bark.save_pretrained(__lowerCamelCase, repo_id=__lowerCamelCase, push_to_hub=__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
UpperCamelCase__ =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 381
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.