code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def __lowercase ( __lowerCAmelCase : int ):
if number > 0:
raise ValueError('input must be a negative integer' )
a__ = len(bin(__lowerCAmelCase )[3:] )
a__ = bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
a__ = (
(
'1'
+ '0' * (binary_number_length - len(__lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :int ) -> Dict:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Dict ,__snake_case :str ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple=True ) -> Tuple:
a__ = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[Any] ,__snake_case :str ,__snake_case :Dict ,__snake_case :Any=True ) -> List[Any]:
a__ = ()
for resnet in self.resnets:
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :List[str] ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict=True ) -> int:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :Optional[Any]=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[Any]:
# there is always at least one resnet
a__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
a__ = []
for _ in range(self.num_layers ):
a__ = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
a__ = attentions
def __call__( self :Optional[Any] ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :int ,__snake_case :int=True ) -> str:
a__ = self.resnets[0](__snake_case ,__snake_case )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
return hidden_states
| 335
| 1
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['vqvae']
def __init__( self : Union[str, Any] , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Mel , UpperCamelCase__ : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , mel=UpperCamelCase__ , vqvae=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , UpperCamelCase__ ) else 1_000
@torch.no_grad()
def __call__( self : List[Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : str = None , UpperCamelCase__ : np.ndarray = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = None , UpperCamelCase__ : torch.Generator = None , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 0 , UpperCamelCase__ : torch.Generator = None , UpperCamelCase__ : float = 0 , UpperCamelCase__ : torch.Tensor = None , UpperCamelCase__ : torch.Tensor = None , UpperCamelCase__ : Union[str, Any]=True , ):
'''simple docstring'''
lowercase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase__ )
lowercase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase__ , device=self.device , )
lowercase_ = noise
lowercase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.mel.audio_slice_to_image(UpperCamelCase__ )
lowercase_ = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
lowercase_ = (input_image / 255) * 2 - 1
lowercase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase_ = self.vqvae.encode(torch.unsqueeze(UpperCamelCase__ , 0 ) ).latent_dist.sample(
generator=UpperCamelCase__ )[0]
lowercase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase_ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , self.scheduler.timesteps[start_step - 1] )
lowercase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase_ = int(mask_start_secs * pixels_per_second )
lowercase_ = int(mask_end_secs * pixels_per_second )
lowercase_ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase__ ):
lowercase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
else:
lowercase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
if isinstance(self.scheduler , UpperCamelCase__ ):
lowercase_ = self.scheduler.step(
model_output=UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , )["""prev_sample"""]
else:
lowercase_ = self.scheduler.step(
model_output=UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , generator=UpperCamelCase__ , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
lowercase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase_ = 1 / self.vqvae.config.scaling_factor * images
lowercase_ = self.vqvae.decode(UpperCamelCase__ )["""sample"""]
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase_ = (images * 255).round().astype("""uint8""" )
lowercase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase__ , mode="""RGB""" ).convert("""L""" ) for _ in images) )
lowercase_ = [self.mel.image_to_audio(UpperCamelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase__ ) )
@torch.no_grad()
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : List[Image.Image] , UpperCamelCase__ : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , UpperCamelCase__ )
self.scheduler.set_timesteps(UpperCamelCase__ )
lowercase_ = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
lowercase_ = (sample / 255) * 2 - 1
lowercase_ = torch.Tensor(UpperCamelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase_ = self.scheduler.alphas_cumprod[t]
lowercase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase_ = 1 - alpha_prod_t
lowercase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
lowercase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : float ):
'''simple docstring'''
lowercase_ = acos(torch.dot(torch.flatten(UpperCamelCase__ ) , torch.flatten(UpperCamelCase__ ) ) / torch.norm(UpperCamelCase__ ) / torch.norm(UpperCamelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase__ ) + sin(alpha * theta ) * xa / sin(UpperCamelCase__ )
| 650
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
a = logging.get_logger(__name__)
a = 'ybelkada/fonts'
def UpperCAmelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , ["""torch"""] )
_check_torch_version()
lowercase_ = image_tensor.unsqueeze(0 )
lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 )
lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowercase_ = textwrap.TextWrapper(width=8_0 )
lowercase_ = wrapper.wrap(text=UpperCAmelCase__ )
lowercase_ = """\n""".join(UpperCAmelCase__ )
if font_bytes is not None and font_path is None:
lowercase_ = io.BytesIO(UpperCAmelCase__ )
elif font_path is not None:
lowercase_ = font_path
else:
lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" )
lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ )
# Create the actual image with a bit of padding around the text.
lowercase_ = text_width + left_padding + right_padding
lowercase_ = text_height + top_padding + bottom_padding
lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ )
lowercase_ = ImageDraw.Draw(UpperCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Convert to PIL image if necessary
lowercase_ = to_pil_image(UpperCAmelCase__ )
lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = max(header_image.width , image.width )
lowercase_ = int(image.height * (new_width / image.width) )
lowercase_ = int(header_image.height * (new_width / header_image.width) )
lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ = to_numpy_array(UpperCAmelCase__ )
if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST:
lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowercase_ = do_normalize
lowercase_ = do_convert_rgb
lowercase_ = max_patches
lowercase_ = is_vqa
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowercase_ = torch.from_numpy(UpperCamelCase__ )
lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""]
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(num_feasible_rows * patch_height , 1 )
lowercase_ = max(num_feasible_cols * patch_width , 1 )
lowercase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = patches.shape
lowercase_ = patches_shape[1]
lowercase_ = patches_shape[2]
lowercase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ = row_ids.to(torch.floataa )
lowercase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ = to_numpy_array(UpperCamelCase__ )
return result
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ = np.mean(UpperCamelCase__ )
lowercase_ = np.std(UpperCamelCase__ )
lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = patch_size if patch_size is not None else self.patch_size
lowercase_ = max_patches if max_patches is not None else self.max_patches
lowercase_ = self.is_vqa
if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [header_text] * len(UpperCamelCase__ )
lowercase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowercase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 650
| 1
|
"""simple docstring"""
UpperCamelCase = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 104
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a: int = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Dict = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__a: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , ):
output_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , use_external_data_format=__UpperCamelCase , enable_onnx_checker=__UpperCamelCase , opset_version=__UpperCamelCase , )
else:
export(
__UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , opset_version=__UpperCamelCase , )
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ):
__lowercase : Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowercase : List[str] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__lowercase : str = '''cpu'''
__lowercase : Optional[Any] = Path(__UpperCamelCase )
# VAE DECODER
__lowercase : List[str] = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__lowercase : List[Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowercase : Optional[int] = vae_decoder.decode
onnx_export(
__UpperCamelCase , model_args=(
torch.randn(1 , __UpperCamelCase , 25 , 25 ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__UpperCamelCase , )
del vae_decoder
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
a_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 720
|
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( __UpperCamelCase ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCamelCase ) + 1 ) )
def __UpperCAmelCase ( __UpperCamelCase = 10**6 ):
__lowercase : Optional[int] = 0
__lowercase : Dict = 1
__lowercase : int = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 523
| 0
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = 1, 1
__SCREAMING_SNAKE_CASE : int = 2
while True:
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = fa + fa
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = fa, f
index += 1
for _ in str(_SCREAMING_SNAKE_CASE ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 211
|
'''simple docstring'''
from __future__ import annotations
def __A ( _SCREAMING_SNAKE_CASE : int | str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def __A ( _SCREAMING_SNAKE_CASE : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 211
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class a ( snake_case__ , snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = """bit"""
__lowerCAmelCase : List[str] = ["""preactivation""", """bottleneck"""]
__lowerCAmelCase : int = ["""SAME""", """VALID"""]
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=6_4 , lowerCamelCase_=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowerCamelCase_=[3, 4, 6, 3] , lowerCamelCase_="preactivation" , lowerCamelCase_="relu" , lowerCamelCase_=None , lowerCamelCase_=3_2 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_=3_2 , lowerCamelCase_=1 , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_a : str = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
_a : Dict = num_channels
_a : Optional[Any] = embedding_size
_a : List[str] = hidden_sizes
_a : str = depths
_a : Optional[int] = layer_type
_a : Dict = hidden_act
_a : Union[str, Any] = global_padding
_a : Tuple = num_groups
_a : Any = drop_path_rate
_a : Tuple = embedding_dynamic_padding
_a : Tuple = output_stride
_a : List[Any] = width_factor
_a : List[Any] = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
_a , _a : str = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 424
|
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def UpperCAmelCase_ ( A=None , A=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A )
@dataclass
class a :
'''simple docstring'''
__lowerCAmelCase : str = field(
metadata={"""help""": """The csv file to plot."""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
__lowerCAmelCase : bool = field(
default=snake_case__ , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
__lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
__lowerCAmelCase : Optional[List[str]] = list_field(
default=snake_case__ , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
try:
int(A )
return True
except ValueError:
return False
def UpperCAmelCase_ ( A ):
'''simple docstring'''
try:
float(A )
return True
except ValueError:
return False
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Any:
_a : Any = args
_a : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_a : Optional[int] = csv.DictReader(lowerCamelCase_ )
for row in reader:
_a : List[str] = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_a : List[Any] = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_a : Any = float(row['result'] )
def __UpperCamelCase ( self ) -> Any:
_a , _a : Optional[Any] = plt.subplots()
_a : Any = 'Time usage' if self.args.is_time else 'Memory usage'
_a : List[str] = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_a : Tuple = sorted(set(self.result_dict[model_name]['bsz'] ) )
_a : str = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_a : Union[str, Any] = self.result_dict[model_name]['result']
((_a) , (_a)) : str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_a : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_a : List[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase_ , )
else:
_a : List[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_a) , (_a)) : int = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_a : Union[str, Any] = np.asarray(lowerCamelCase_ , lowerCamelCase_ )[: len(lowerCamelCase_ )]
plt.scatter(
lowerCamelCase_ , lowerCamelCase_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowerCamelCase_ , lowerCamelCase_ , '--' )
title_str += F''' {label_model_name} vs.'''
_a : Optional[int] = title_str[:-4]
_a : Optional[Any] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCamelCase_ )
plt.xlabel(lowerCamelCase_ )
plt.ylabel(lowerCamelCase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Tuple = HfArgumentParser(A )
_a : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
_a : Any = Plot(args=A )
plot.plot()
if __name__ == "__main__":
main()
| 424
| 1
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case :
"""simple docstring"""
def __init__( self : Dict , __A : Dict , __A : str=1_3 , __A : Tuple=7 , __A : Tuple=True , __A : List[Any]=True , __A : List[str]=9_9 , __A : List[Any]=3_2 , __A : List[Any]=5 , __A : str=4 , __A : List[str]=3_7 , __A : int="gelu" , __A : Optional[int]=0.1 , __A : str=0.1 , __A : Dict=5_0 , __A : Optional[Any]=0.02 , __A : Dict=True , __A : Dict=None , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = use_labels
__UpperCamelCase = scope
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self : str ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__A , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Optional[int] ):
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
__UpperCamelCase = True
__UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self : Tuple , __A : List[Any] , __A : str , __A : Tuple , __A : List[str] , **__A : List[str] , ):
__UpperCamelCase = BertGenerationEncoder(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A , attention_mask=__A )
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Any , __A : Tuple , __A : int , __A : Dict , __A : Tuple , __A : Optional[Any] , __A : List[Any] , **__A : List[str] , ):
__UpperCamelCase = True
__UpperCamelCase = BertGenerationEncoder(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
__UpperCamelCase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Any , __A : Optional[int] , __A : int , __A : Any , __A : Optional[Any] , __A : Tuple , __A : List[str] , **__A : Union[str, Any] , ):
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = BertGenerationDecoder(config=__A ).to(__A ).eval()
# first forward pass
__UpperCamelCase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , )
__UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )['hidden_states'][0]
__UpperCamelCase = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )['hidden_states'][0]
# select random slice
__UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) )
def _lowerCamelCase ( self : List[Any] , __A : List[str] , __A : str , __A : Any , __A : List[Any] , *__A : Tuple , ):
__UpperCamelCase = BertGenerationDecoder(__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =(BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Dict =(BertGenerationDecoder,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Any =(
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCamelCase ( self : str ):
__UpperCamelCase = BertGenerationEncoderTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowerCamelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowerCamelCase ( self : str ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase = 'bert'
self.model_tester.create_and_check_model(__A , __A , __A , __A )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowerCamelCase ( self : Optional[Any] ):
# This regression test was failing with PyTorch < 1.3
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
__A , __A , __A , __A , __A , __A , )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__A )
@slow
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__A )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__UpperCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__UpperCamelCase = model(__A )[0]
__UpperCamelCase = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , __A )
__UpperCamelCase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self : str ):
__UpperCamelCase = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__UpperCamelCase = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__UpperCamelCase = model(__A )[0]
__UpperCamelCase = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , __A )
__UpperCamelCase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 399
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a__ : Union[str, Any] =getLogger(__name__)
a__ : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase__ ( __lowercase : List[str] , __lowercase : str , __lowercase : str , __lowercase : int = 8 , __lowercase : str = DEFAULT_DEVICE , __lowercase : Optional[int]=False , __lowercase : Optional[Any]="summarization" , __lowercase : List[str]=None , **__lowercase : List[str] , ) -> Dict:
"""simple docstring"""
__UpperCamelCase = Path(__lowercase ).open('w' , encoding='utf-8' )
__UpperCamelCase = str(__lowercase )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ).to(__lowercase )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(__lowercase )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(__lowercase , __lowercase )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(__lowercase , __lowercase ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(__lowercase , return_tensors='pt' , truncation=__lowercase , padding='longest' ).to(__lowercase )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__lowercase , )
__UpperCamelCase = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(__lowercase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase__ ( ) -> Dict:
"""simple docstring"""
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowercase__ ( __lowercase : Dict=True ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('model_name' , type=__lowercase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=__lowercase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=__lowercase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=__lowercase , required=__lowercase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=__lowercase , required=__lowercase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=__lowercase , required=__lowercase , default=__lowercase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=__lowercase , required=__lowercase , default=__lowercase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=__lowercase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__lowercase , default=8 , required=__lowercase , help='batch size' )
parser.add_argument(
'--n_obs' , type=__lowercase , default=-1 , required=__lowercase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=__lowercase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(__lowercase )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
__UpperCamelCase = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__lowercase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__UpperCamelCase = generate_summaries_or_translations(
__lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__lowercase , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if 'translation' in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__lowercase )]
__UpperCamelCase = score_fn(__lowercase , __lowercase )
scores.update(__lowercase )
if args.dump_args:
scores.update(__lowercase )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(__lowercase )
if args.score_path is not None:
json.dump(__lowercase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 399
| 1
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 618
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
snake_case__ : List[str] = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
snake_case__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 618
| 1
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ = 'true'
def lowerCamelCase__ ( _a , _a=82 , _a=16):
set_seed(42)
SCREAMING_SNAKE_CASE : List[Any] = RegressionModel()
SCREAMING_SNAKE_CASE : Optional[Any] = deepcopy(_a)
SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=_a)
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(_a , batch_size=_a)
model.to(accelerator.device)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = accelerator.prepare(_a , _a)
return model, ddp_model, dataloader
def lowerCamelCase__ ( _a , _a=False):
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased")
SCREAMING_SNAKE_CASE : Optional[int] = load_dataset("glue" , "mrpc" , split="validation")
def tokenize_function(_a):
SCREAMING_SNAKE_CASE : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_a , max_length=_a)
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.map(
_a , batched=_a , remove_columns=["idx", "sentence1", "sentence2"] , )
SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(_a):
if use_longest:
return tokenizer.pad(_a , padding="longest" , return_tensors="pt")
return tokenizer.pad(_a , padding="max_length" , max_length=128 , return_tensors="pt")
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = Accelerator(dispatch_batches=_a , split_batches=_a)
SCREAMING_SNAKE_CASE : str = get_dataloader(_a , not dispatch_batches)
SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(_a , _a)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = []
for batch in dataloader:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_a)
targs.append(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = torch.cat(_a), torch.cat(_a)
return logits, targs
def lowerCamelCase__ ( _a , _a=82 , _a=False , _a=False , _a=16):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = get_basic_setup(_a , _a , _a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = generate_predictions(_a , _a , _a)
assert (
len(_a) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a)}"
def lowerCamelCase__ ( _a = False , _a = False):
SCREAMING_SNAKE_CASE : List[str] = evaluate.load("glue" , "mrpc")
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = get_mrpc_setup(_a , _a)
# First do baseline
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = setup["no"]
model.to(_a)
model.eval()
for batch in dataloader:
batch.to(_a)
with torch.inference_mode():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=_a , references=batch["labels"])
SCREAMING_SNAKE_CASE : Dict = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE : Optional[int] = model(**_a)
SCREAMING_SNAKE_CASE : Tuple = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE : Union[str, Any] = batch["labels"]
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=_a , references=_a)
SCREAMING_SNAKE_CASE : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = Accelerator(split_batches=_a , dispatch_batches=_a)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**")
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`")
test_mrpc(_a , _a)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**")
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE : Tuple = Accelerator(split_batches=_a , dispatch_batches=_a)
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99")
test_torch_metrics(_a , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**")
SCREAMING_SNAKE_CASE : str = Accelerator()
test_torch_metrics(_a , 512)
accelerator.state._reset_state()
def lowerCamelCase__ ( _a):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 25
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["image_processor", "tokenizer"]
_lowerCamelCase ="CLIPImageProcessor"
_lowerCamelCase =("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , a__ : List[Any]=None , a__ : str=None , **a__ : Tuple ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__( self : Optional[Any] , a__ : Optional[int]=None , a__ : List[str]=None , a__ : int=None , **a__ : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
UpperCAmelCase = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __snake_case ( self : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __snake_case ( self : int , *a__ : Optional[int] , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __snake_case ( self : str ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __snake_case ( self : Optional[int] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , )
return self.image_processor_class
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , )
return self.image_processor
| 51
| 0
|
def a_ ()-> List[Any]:
snake_case: Tuple = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
snake_case: Optional[int] = 6
snake_case: int = 1
snake_case: str = 1901
snake_case: List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case: Any = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
snake_case: str = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
snake_case: Dict = day - days_per_month[month - 2]
if month > 12:
year += 1
snake_case: Dict = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 164
|
def a_ (_lowerCAmelCase : int = 100 )-> int:
snake_case: int = n * (n + 1) * (2 * n + 1) / 6
snake_case: Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 164
| 1
|
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : int = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
_lowerCAmelCase : Tuple = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_lowerCAmelCase : Tuple = DDPMScheduler()
_lowerCAmelCase : str = AudioDiffusionPipeline(vqvae=_UpperCAmelCase , unet=self.dummy_unet , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase : List[Any] = pipe(generator=_UpperCAmelCase , steps=4 )
_lowerCAmelCase : List[str] = output.audios[0]
_lowerCAmelCase : int = output.images[0]
_lowerCAmelCase : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase : Union[str, Any] = pipe(generator=_UpperCAmelCase , steps=4 , return_dict=_UpperCAmelCase )
_lowerCAmelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_lowerCAmelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Dict = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_lowerCAmelCase : List[str] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_lowerCAmelCase : Any = DDIMScheduler()
_lowerCAmelCase : List[Any] = self.dummy_vqvae_and_unet
_lowerCAmelCase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
_lowerCAmelCase : Union[str, Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_lowerCAmelCase : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase : Dict = pipe(raw_audio=_UpperCAmelCase , generator=_UpperCAmelCase , start_step=5 , steps=10 )
_lowerCAmelCase : Tuple = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_lowerCAmelCase : List[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Optional[int] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_lowerCAmelCase : Union[str, Any] = self.dummy_unet_condition
_lowerCAmelCase : Optional[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_UpperCAmelCase , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
_lowerCAmelCase : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
_lowerCAmelCase : Any = torch.rand((1, 1, 10) )
_lowerCAmelCase : List[Any] = pipe(generator=_UpperCAmelCase , encoding=_UpperCAmelCase )
_lowerCAmelCase : Tuple = output.images[0]
_lowerCAmelCase : Any = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : str = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = torch_device
_lowerCAmelCase : List[str] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
_lowerCAmelCase : str = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
_lowerCAmelCase : Tuple = pipe(generator=_UpperCAmelCase )
_lowerCAmelCase : Any = output.audios[0]
_lowerCAmelCase : List[str] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_lowerCAmelCase : Any = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
_lowerCAmelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 429
| 1
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowercase :
def __init__(self , A = "cpu" , A = "openai/clip-vit-large-patch14" ):
lowerCamelCase_ : List[Any] = device
lowerCamelCase_ : Any = CLIPTokenizerFast.from_pretrained(A )
lowerCamelCase_ : Optional[Any] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
lowerCamelCase_ : Tuple = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
lowerCamelCase_ : str = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCamelCase_ : Tuple = torchvision.transforms.Resize(2_2_4 )
lowerCamelCase_ : List[Any] = torchvision.transforms.CenterCrop(2_2_4 )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = self.resize(A )
lowerCamelCase_ : Optional[int] = self.center_crop(A )
lowerCamelCase_ : Union[str, Any] = self.normalize(A )
return images
def __call__(self , A=None , A=None , **A ):
lowerCamelCase_ : Optional[int] = self.tokenizer(text=A , **A )
lowerCamelCase_ : str = self.preprocess_img(A )
lowerCamelCase_ : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __lowercase ( nn.Module ):
def __init__(self , A=1_0 , A=0.01 , A=None , A=None , A=None , A=None , A=None , A=None , A=False , A=True , A="image" , A=True , A=False , A=False , A=False , ):
super().__init__()
lowerCamelCase_ : str = None
lowerCamelCase_ : Optional[Any] = device if device else get_device()
if vqgan:
lowerCamelCase_ : Optional[int] = vqgan
else:
lowerCamelCase_ : List[Any] = load_vqgan(self.device , conf_path=A , ckpt_path=A )
self.vqgan.eval()
if clip:
lowerCamelCase_ : List[str] = clip
else:
lowerCamelCase_ : Tuple = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
lowerCamelCase_ : Optional[Any] = ProcessorGradientFlow(device=self.device )
lowerCamelCase_ : str = iterations
lowerCamelCase_ : Optional[int] = lr
lowerCamelCase_ : Union[str, Any] = log
lowerCamelCase_ : Any = make_grid
lowerCamelCase_ : Optional[Any] = return_val
lowerCamelCase_ : Any = quantize
lowerCamelCase_ : Optional[int] = self.vqgan.decoder.z_shape
def UpperCAmelCase__ (self , A=None , A=None , A=5 , A=True ):
lowerCamelCase_ : List[Any] = []
if output_path is None:
lowerCamelCase_ : Optional[Any] = '''./animation.gif'''
if input_path is None:
lowerCamelCase_ : List[Any] = self.save_path
lowerCamelCase_ : Optional[Any] = sorted(glob(input_path + '''/*''' ) )
if not len(A ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(A ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
lowerCamelCase_ : int = total_duration / len(A )
lowerCamelCase_ : List[Any] = [frame_duration] * len(A )
if extend_frames:
lowerCamelCase_ : str = 1.5
lowerCamelCase_ : str = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A , A , duration=A )
print(F"""gif saved to {output_path}""" )
def UpperCAmelCase__ (self , A=None , A=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
lowerCamelCase_ : Any = preprocess(Image.open(A ) , target_image_size=2_5_6 ).to(self.device )
lowerCamelCase_ : str = preprocess_vqgan(A )
lowerCamelCase_, *lowerCamelCase_ : List[Any] = self.vqgan.encode(A )
return z
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = self.latent.detach().requires_grad_()
lowerCamelCase_ : List[str] = base_latent + transform_vector
if self.quantize:
lowerCamelCase_, *lowerCamelCase_ : Dict = self.vqgan.quantize(A )
else:
lowerCamelCase_ : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCAmelCase__ (self , A , A , A=None ):
lowerCamelCase_ : Union[str, Any] = self.clip_preprocessor(text=A , images=A , return_tensors='''pt''' , padding=A )
lowerCamelCase_ : str = self.clip(**A )
lowerCamelCase_ : Tuple = clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase_ : Optional[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Dict = self._get_clip_similarity(pos_prompts['''prompts'''] , A , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
lowerCamelCase_ : str = self._get_clip_similarity(neg_prompts['''prompts'''] , A , weights=neg_prompts['''weights'''] )
else:
lowerCamelCase_ : Tuple = torch.tensor([1] , device=self.device )
lowerCamelCase_ : Optional[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Tuple = torch.randn_like(self.latent , requires_grad=A , device=self.device )
lowerCamelCase_ : int = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase_ : str = self._add_vector(A )
lowerCamelCase_ : List[str] = loop_post_process(A )
lowerCamelCase_ : List[Any] = self._get_CLIP_loss(A , A , A )
print('''CLIP loss''' , A )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCAmelCase__ (self , A , A , A ):
wandb.init(reinit=A , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
lowerCamelCase_ : List[Any] = Image.open(A )
lowerCamelCase_ : Dict = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''' , wandb.Image(A ) )
def UpperCAmelCase__ (self , A ):
if not prompts:
return []
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Tuple = []
if isinstance(A , A ):
lowerCamelCase_ : Dict = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(A , (tuple, list) ):
lowerCamelCase_ : Optional[int] = prompt[0]
lowerCamelCase_ : str = float(prompt[1] )
elif ":" in prompt:
lowerCamelCase_, lowerCamelCase_ : int = prompt.split(''':''' )
lowerCamelCase_ : Optional[Any] = float(A )
else:
lowerCamelCase_ : Tuple = prompt
lowerCamelCase_ : int = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A , device=self.device ),
}
def UpperCAmelCase__ (self , A , A=None , A=None , A=True , A=False , A=True , A=True , A=None , ):
if image_path:
lowerCamelCase_ : Any = self._get_latent(A )
else:
lowerCamelCase_ : Union[str, Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(A , A , A )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase_ : Optional[int] = self.process_prompts(A )
lowerCamelCase_ : Optional[Any] = self.process_prompts(A )
if save_final and save_path is None:
lowerCamelCase_ : Any = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
lowerCamelCase_ : Any = save_path + '''_''' + get_timestamp()
os.makedirs(A )
lowerCamelCase_ : Tuple = save_path
lowerCamelCase_ : Optional[int] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(A ) )
lowerCamelCase_ : List[Any] = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A , A , A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
| 357
|
'''simple docstring'''
import os
from math import logaa
def lowercase_ ( _lowercase = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCamelCase_ : float = 0
lowerCamelCase_ : Dict = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowercase ) , _lowercase ) ) ):
lowerCamelCase_, lowerCamelCase_ : Dict = list(map(_lowercase , line.split(''',''' ) ) )
if x * logaa(_lowercase ) > largest:
lowerCamelCase_ : List[str] = x * logaa(_lowercase )
lowerCamelCase_ : Union[str, Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 357
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 2048,
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : int , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]="<|endoftext|>" , UpperCAmelCase_ : Optional[int]="<|endoftext|>" , UpperCAmelCase_ : int="<|endoftext|>" , UpperCAmelCase_ : int=False , **UpperCAmelCase_ : Optional[int] , ) ->int:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_) != add_prefix_space:
lowerCamelCase__: Dict =getattr(UpperCAmelCase_ , pre_tok_state.pop("type"))
lowerCamelCase__: Optional[int] =add_prefix_space
lowerCamelCase__: Any =pre_tok_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =add_prefix_space
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: str =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : "Conversation") ->List[int]:
'''simple docstring'''
lowerCamelCase__: int =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) + [self.eos_token_id])
if len(UpperCAmelCase_) > self.model_max_length:
lowerCamelCase__: str =input_ids[-self.model_max_length :]
return input_ids
| 59
|
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Any =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Optional[int] =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: Tuple =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 59
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530
| 0
|
'''simple docstring'''
from math import sqrt
def __lowerCamelCase ( UpperCAmelCase_ ) ->bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( UpperCAmelCase_ = 1_00_01 ) ->int:
snake_case__ = 0
snake_case__ = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCAmelCase_ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCAmelCase_ ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368
|
'''simple docstring'''
a__ : Optional[Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
snake_case__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCAmelCase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCAmelCase_ )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](UpperCAmelCase_ , UpperCAmelCase_ )
operand_stack.push(UpperCAmelCase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a__ : Any = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 368
| 1
|
'''simple docstring'''
import re
from ..utils import cached_file
# docstyle-ignore
__A : Any = '\nHuman: <<task>>\n\nAssistant: '
__A : int = 'huggingface-tools/default-prompts'
__A : Tuple = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCAmelCase_ ( a : int , a : Optional[Any] , a : Tuple="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , a ) is not None:
return prompt_or_repo_id
a__ = cached_file(
a , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(a , 'r' , encoding='utf-8' ) as f:
return f.read()
| 704
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__A : Optional[int] = logging.get_logger(__name__)
def lowerCAmelCase_ ( a : List[Any] ):
if isinstance(a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(a ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[str] = ['pixel_values']
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = None , _a = True , _a = 1 / 255 , _a = True , _a = True , _a = None , _a = None , **_a , ):
"""simple docstring"""
super().__init__(**_a )
a__ = size if size is not None else {'shortest_edge': 256}
a__ = get_size_dict(_a , default_to_square=_a )
a__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ = get_size_dict(_a , param_name='crop_size' )
a__ = do_resize
a__ = size
a__ = do_center_crop
a__ = crop_size
a__ = resample
a__ = do_rescale
a__ = rescale_factor
a__ = offset
a__ = do_normalize
a__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ):
"""simple docstring"""
a__ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" in size:
a__ = get_resize_output_image_size(_a , size['shortest_edge'] , default_to_square=_a )
elif "height" in size and "width" in size:
a__ = (size['height'], size['width'])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def lowercase__ ( self , _a , _a , _a = None , **_a , ):
"""simple docstring"""
a__ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_a , size=(size['height'], size['width']) , data_format=_a , **_a )
def lowercase__ ( self , _a , _a , _a = True , _a = None , **_a , ):
"""simple docstring"""
a__ = image.astype(np.floataa )
if offset:
a__ = image - (scale / 2)
return rescale(_a , scale=_a , data_format=_a , **_a )
def lowercase__ ( self , _a , _a , _a , _a = None , **_a , ):
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def lowercase__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
a__ = to_numpy_array(_a )
if do_resize:
a__ = self.resize(image=_a , size=_a , resample=_a )
if do_center_crop:
a__ = self.center_crop(_a , size=_a )
if do_rescale:
a__ = self.rescale(image=_a , scale=_a , offset=_a )
if do_normalize:
a__ = self.normalize(image=_a , mean=_a , std=_a )
a__ = to_channel_dimension_format(_a , _a )
return image
def lowercase__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
"""simple docstring"""
a__ = do_resize if do_resize is not None else self.do_resize
a__ = resample if resample is not None else self.resample
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = offset if offset is not None else self.offset
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = size if size is not None else self.size
a__ = get_size_dict(_a , default_to_square=_a )
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(_a , param_name='crop_size' )
if not valid_images(_a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
a__ = make_batched(_a )
a__ = [
[
self._preprocess_image(
image=_a , do_resize=_a , size=_a , resample=_a , do_center_crop=_a , crop_size=_a , do_rescale=_a , rescale_factor=_a , offset=_a , do_normalize=_a , image_mean=_a , image_std=_a , data_format=_a , )
for img in video
]
for video in videos
]
a__ = {'pixel_values': videos}
return BatchFeature(data=_a , tensor_type=_a )
| 126
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Any=2 , __lowerCAmelCase : int=99 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Union[str, Any]="last" , __lowerCAmelCase : str=True , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[Any]=0 , ) -> Dict:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_lengths
A__ = use_token_type_ids
A__ = use_labels
A__ = gelu_activation
A__ = sinusoidal_embeddings
A__ = causal
A__ = asm
A__ = n_langs
A__ = vocab_size
A__ = n_special
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = summary_type
A__ = use_proj
A__ = scope
A__ = bos_token_id
def a_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_input_lengths:
A__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , 2 ).float()
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def a_ ( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , ) -> Tuple:
"""simple docstring"""
A__ = XLMModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , lengths=__lowerCAmelCase , langs=__lowerCAmelCase )
A__ = model(__lowerCAmelCase , langs=__lowerCAmelCase )
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
A__ = XLMWithLMHeadModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , ) -> int:
"""simple docstring"""
A__ = XLMForQuestionAnsweringSimple(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase )
A__ = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
A__ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
A__ = XLMForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase )
A__ = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , p_mask=__lowerCAmelCase , )
A__ = model(
__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , cls_index=__lowerCAmelCase , is_impossible=__lowerCAmelCase , )
((A__) , ) = result_with_labels.to_tuple()
A__ = model(__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase )
((A__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a_ ( self : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , ) -> Dict:
"""simple docstring"""
A__ = XLMForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase )
A__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : str , ) -> Tuple:
"""simple docstring"""
A__ = self.num_labels
A__ = XLMForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
A__ = self.num_choices
A__ = XLMForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowerCamelCase : int = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowerCamelCase : Any = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def a_ ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
A__ = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def a_ ( self : Dict ) -> int:
"""simple docstring"""
A__ = XLMModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , emb_dim=37 )
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def a_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__lowerCAmelCase )
def a_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__lowerCAmelCase )
def a_ ( self : str ) -> Dict:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__lowerCAmelCase )
def a_ ( self : str ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__lowerCAmelCase )
def a_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__lowerCAmelCase )
def a_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(
[isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(__lowerCAmelCase ) )
self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__lowerCAmelCase ):
# adds PAD dummy token
A__ = min_length + idx + 1
A__ = min_length + idx + 1
A__ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowerCAmelCase ) )
def a_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=1 ) -> List[str]:
"""simple docstring"""
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(
[isinstance(__lowerCAmelCase , __lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(__lowerCAmelCase ) , )
self.assertEqual(len(__lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__lowerCAmelCase ):
# adds PAD dummy token
A__ = min_length + idx + 1
A__ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowerCAmelCase ) , )
pass
@slow
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = XLMModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self : List[Any] ) -> int:
"""simple docstring"""
A__ = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(__lowerCAmelCase )
A__ = torch.tensor([[14, 4_47]] , dtype=torch.long , device=__lowerCAmelCase ) # the president
A__ = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A__ = model.generate(__lowerCAmelCase , do_sample=__lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowerCAmelCase )
| 176
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Optional[Any] = '''pt'''
elif is_tf_available():
A : List[Any] = '''tf'''
else:
A : Union[str, Any] = '''jax'''
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = ByTaTokenizer
__lowerCamelCase : Tuple = False
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
A__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def a_ ( self : Union[str, Any] , **__lowerCAmelCase : int ) -> ByTaTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=20 , __lowerCAmelCase : List[str]=5 ) -> Tuple[str, list]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
try:
A__ = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ = list(filter(lambda __lowerCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __lowerCAmelCase ) )
A__ = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
A__ = """ """ + output_txt
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
A__ = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = """Unicode €."""
A__ = tokenizer(__lowerCAmelCase )
A__ = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
A__ = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """Unicode €.</s>""" )
A__ = tokenizer("""e è é ê ë""" )
A__ = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
A__ = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def a_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
A__ = list(batch.input_ids.numpy()[0] )
else:
A__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __lowerCAmelCase )
self.assertIn("""attention_mask""" , __lowerCAmelCase )
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = [
"""Summary of the text.""",
"""Another summary.""",
]
A__ = tokenizer(
text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization. </s>"""]
A__ = ["""Summary of the text. </s>"""]
# fmt: off
A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
A__ = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
A__ = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch["""input_ids"""][0] )
self.assertEqual(__lowerCAmelCase , batch["""labels"""][0] )
def a_ ( self : str ) -> Dict:
"""simple docstring"""
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = """ He is very happy, UNwant\u00E9d,running"""
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
A__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
A__ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
A__ = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
A__ = json.load(__lowerCAmelCase )
A__ = [f'<extra_id_{i}>' for i in range(1_25 )]
A__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
A__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ = tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__lowerCAmelCase )]
A__ = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def a_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
A__ = tokenizer_class.from_pretrained(__lowerCAmelCase )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def a_ ( self : int ) -> Dict:
"""simple docstring"""
A__ = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
A__ = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> int:
"""simple docstring"""
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
A__ = 0
A__ = tokenizer.convert_ids_to_tokens(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
for attr in attributes_list:
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [] )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 176
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple=13 , snake_case : Any=7 , snake_case : str=True , snake_case : Tuple=True , snake_case : Dict=True , snake_case : Union[str, Any]=True , snake_case : List[Any]=True , snake_case : Dict=False , snake_case : Tuple=False , snake_case : List[str]=False , snake_case : Optional[Any]=2 , snake_case : int=99 , snake_case : Any=0 , snake_case : int=32 , snake_case : Optional[int]=5 , snake_case : Any=4 , snake_case : str=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[int]=512 , snake_case : Any=2 , snake_case : Union[str, Any]=0.02 , snake_case : int=2 , snake_case : List[Any]=4 , snake_case : List[Any]="last" , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=None , snake_case : Dict=0 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_lengths
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = gelu_activation
__UpperCamelCase = sinusoidal_embeddings
__UpperCamelCase = causal
__UpperCamelCase = asm
__UpperCamelCase = n_langs
__UpperCamelCase = vocab_size
__UpperCamelCase = n_special
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = summary_type
__UpperCamelCase = use_proj
__UpperCamelCase = scope
__UpperCamelCase = bos_token_id
def snake_case ( self : Any ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_input_lengths:
__UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self : str ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def snake_case ( self : List[str] , snake_case : List[Any] , snake_case : Dict , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : List[str] , snake_case : int , snake_case : List[Any] , snake_case : Any , ):
__UpperCamelCase = XLMModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , lengths=snake_case , langs=snake_case )
__UpperCamelCase = model(snake_case , langs=snake_case )
__UpperCamelCase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : int , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : int , snake_case : Tuple , snake_case : str , ):
__UpperCamelCase = XLMWithLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Any , snake_case : List[Any] , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Tuple , snake_case : int , snake_case : str , snake_case : Union[str, Any] , ):
__UpperCamelCase = XLMForQuestionAnsweringSimple(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
__UpperCamelCase = model(snake_case , start_positions=snake_case , end_positions=snake_case )
__UpperCamelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : int , snake_case : Tuple , snake_case : Any , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : str , snake_case : Tuple , snake_case : Dict , ):
__UpperCamelCase = XLMForQuestionAnswering(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
__UpperCamelCase = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , p_mask=snake_case , )
__UpperCamelCase = model(
snake_case , start_positions=snake_case , end_positions=snake_case , cls_index=snake_case , is_impossible=snake_case , )
((__UpperCamelCase ) , ) = result_with_labels.to_tuple()
__UpperCamelCase = model(snake_case , start_positions=snake_case , end_positions=snake_case )
((__UpperCamelCase ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self : Any , snake_case : List[Any] , snake_case : int , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Dict , snake_case : Tuple , snake_case : int , ):
__UpperCamelCase = XLMForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case )
__UpperCamelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Optional[int] , snake_case : Tuple , snake_case : int , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : int , ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = XLMForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Optional[int] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[int] , snake_case : str , snake_case : List[str] , snake_case : Any , snake_case : Optional[Any] , snake_case : List[Any] , ):
__UpperCamelCase = self.num_choices
__UpperCamelCase = XLMForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Optional[int] ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase__ : Tuple = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self : Dict , snake_case : List[str] , snake_case : str , snake_case : Tuple , snake_case : List[Any] , snake_case : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self : Dict , snake_case : List[str] , snake_case : Tuple , snake_case : List[str]=False ):
__UpperCamelCase = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
__UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def snake_case ( self : List[str] ):
__UpperCamelCase = XLMModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=snake_case , emb_dim=37 )
def snake_case ( self : Optional[int] ):
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case )
def snake_case ( self : List[str] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case )
def snake_case ( self : str ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case )
def snake_case ( self : List[Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case )
def snake_case ( self : Any ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case )
def snake_case ( self : str , snake_case : List[str] , snake_case : Dict , snake_case : Union[str, Any] , snake_case : int , snake_case : List[Any] , snake_case : Any=False , snake_case : Optional[Any]=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_attentions in attentions] , [True] * len(snake_case ) )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case ):
# adds PAD dummy token
__UpperCamelCase = min_length + idx + 1
__UpperCamelCase = min_length + idx + 1
__UpperCamelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case ) )
def snake_case ( self : List[str] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[int] , snake_case : int , snake_case : List[Any] , snake_case : Optional[Any]=False , snake_case : str=1 ):
self.assertIsInstance(snake_case , snake_case )
self.assertListEqual(
[isinstance(snake_case , snake_case ) for iter_hidden_states in hidden_states] , [True] * len(snake_case ) , )
self.assertEqual(len(snake_case ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case ):
# adds PAD dummy token
__UpperCamelCase = min_length + idx + 1
__UpperCamelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case ) , )
pass
@slow
def snake_case ( self : Any ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = XLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : int ):
__UpperCamelCase = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(snake_case )
__UpperCamelCase = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case ) # the president
__UpperCamelCase = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__UpperCamelCase = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case )
| 705
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = "rwkv"
lowerCAmelCase__ : Union[str, Any] = {"max_position_embeddings": "context_length"}
def __init__( self : Optional[int] , snake_case : Optional[Any]=50277 , snake_case : str=1024 , snake_case : str=4096 , snake_case : Optional[Any]=32 , snake_case : Union[str, Any]=None , snake_case : Optional[Any]=None , snake_case : Optional[Any]=1E-5 , snake_case : List[str]=0 , snake_case : Optional[Any]=0 , snake_case : Union[str, Any]=6 , snake_case : Tuple=False , snake_case : Any=True , **snake_case : List[str] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = context_length
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__UpperCamelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = rescale_every
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
tie_word_embeddings=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
| 375
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase__ : Dict = logging.getLogger(__name__)
@dataclass
class _snake_case :
__lowerCAmelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCAmelCase : bool = field(default=UpperCAmelCase_ , metadata={'help': 'Whether tp freeze the encoder.'} )
__lowerCAmelCase : bool = field(default=UpperCAmelCase_ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class _snake_case :
__lowerCAmelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__lowerCAmelCase : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__lowerCAmelCase : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__lowerCAmelCase : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCAmelCase : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__lowerCAmelCase : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__lowerCAmelCase : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase_ , metadata={'help': 'Source language id for translation.'} )
__lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase_ , metadata={'help': 'Target language id for translation.'} )
__lowerCAmelCase : Optional[int] = field(default=UpperCAmelCase_ , metadata={'help': '# num_beams to use for evaluation.'} )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase_ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(lowercase_ , os.path.join(lowercase_ , F'{split}_results.json' ) )
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowercase__ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : int = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ : Tuple = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase__ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ : str = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase__ : Tuple = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Union[str, Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase__ : Tuple = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase__ : Tuple = SeqaSeqDataset
# Get datasets
lowercase__ : Dict = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase__ : Union[str, Any] = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase__ : Union[str, Any] = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase__ : List[Any] = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase__ : Optional[int] = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase__ : Any = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase__ : Optional[Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase__ : Optional[Any] = train_result.metrics
lowercase__ : str = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase__ : Any = trainer.evaluate(metric_key_prefix="""val""" )
lowercase__ : int = data_args.n_val
lowercase__ : Any = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase__ : Optional[Any] = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase__ : List[Any] = test_output.metrics
lowercase__ : Any = data_args.n_test
if trainer.is_world_process_zero():
lowercase__ : Any = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase__ : Dict = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase__ : Optional[int] = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def UpperCamelCase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 12
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ = "▁" ,UpperCAmelCase_ = True ,UpperCAmelCase_ = "<unk>" ,UpperCAmelCase_ = "</s>" ,UpperCAmelCase_ = "<pad>" ,) -> Optional[int]:
lowercase__ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
lowercase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase__ = token_dict["token"]
lowercase__ = Tokenizer(Unigram() )
lowercase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) ," " ),
normalizers.Lowercase(),
] )
lowercase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ ),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase_ ),
pre_tokenizers.Punctuation(),
] )
lowercase__ = decoders.Metaspace(replacement=UpperCAmelCase_ ,add_prefix_space=UpperCAmelCase_ )
lowercase__ = TemplateProcessing(
single=F'''$A {self.special_tokens['eos']['token']}''' ,special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] ,)
lowercase__ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(UpperCAmelCase_ ,UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = 8_000 ,UpperCAmelCase_ = True ,) -> List[str]:
lowercase__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase_ ,special_tokens=self.special_tokens_list ,show_progress=UpperCAmelCase_ ,)
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ = [files]
self._tokenizer.train(UpperCAmelCase_ ,trainer=UpperCAmelCase_ )
self.add_unk_id()
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = 8_000 ,UpperCAmelCase_ = True ,) -> Union[str, Any]:
lowercase__ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase_ ,special_tokens=self.special_tokens_list ,show_progress=UpperCAmelCase_ ,)
self._tokenizer.train_from_iterator(UpperCAmelCase_ ,trainer=UpperCAmelCase_ )
self.add_unk_id()
def _a ( self ) -> str:
lowercase__ = json.loads(self._tokenizer.to_str() )
lowercase__ = self.special_tokens["unk"]["id"]
lowercase__ = Tokenizer.from_str(json.dumps(UpperCAmelCase_ ) )
| 267
| 0
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__snake_case :Tuple =random.Random()
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]=1.0 , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Tuple=None ) -> Any:
'''simple docstring'''
if rng is None:
A = global_rng
A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple=7 , __UpperCamelCase : List[str]=400 , __UpperCamelCase : List[str]=2_000 , __UpperCamelCase : Tuple=10 , __UpperCamelCase : str=160 , __UpperCamelCase : int=8 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Optional[Any]=4_000 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Tuple=True , ) -> List[Any]:
A = parent
A = batch_size
A = min_seq_length
A = max_seq_length
A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A = padding_value
A = sampling_rate
A = return_attention_mask
A = do_normalize
A = feature_size
A = chunk_length
A = hop_length
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict=False , __UpperCamelCase : Tuple=False ) -> Optional[int]:
def _flatten(__UpperCamelCase : str ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
A_ : Dict = WhisperFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : Any ) -> str:
A = WhisperFeatureExtractionTester(self )
def __UpperCamelCase ( self : Any ) -> str:
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
A = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
A = feat_extract_first.to_dict()
A = feat_extract_second.to_dict()
A = feat_extract_first.mel_filters
A = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
A = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(lowerCamelCase__ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase__ )
A = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
A = feat_extract_first.to_dict()
A = feat_extract_second.to_dict()
A = feat_extract_first.mel_filters
A = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def __UpperCamelCase ( self : List[str] ) -> Dict:
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
A = feature_extractor(lowerCamelCase__ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
A = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
A = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
A = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A = np.asarray(lowerCamelCase__ )
A = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
A = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
A = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
A = [x[: feature_extractor.n_samples] for x in speech_inputs]
A = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
A = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
A = feature_extractor(lowerCamelCase__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
import torch
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A = np.random.rand(100 , 32 ).astype(np.floataa )
A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self : str , __UpperCamelCase : Union[str, Any] ) -> Tuple:
A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
A = ds.sort('id' ).select(range(lowerCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
A = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
A = self._load_datasamples(1 )
A = WhisperFeatureExtractor()
A = feature_extractor(lowerCamelCase__ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase__ , atol=1e-4 ) )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A = self._load_datasamples(1 )[0]
A = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
A = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 718
|
__snake_case :List[Any] =range(2, 20 + 1)
__snake_case :Dict =[10**k for k in range(ks[-1] + 1)]
__snake_case :dict[int, dict[int, list[list[int]]]] ={}
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
A = sum(a_i[j] for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) )
A = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) )
A , A = 0, 0
A = n - i
A = memo.get(lowerCAmelCase__ )
if sub_memo is not None:
A = sub_memo.get(lowerCAmelCase__ )
if jumps is not None and len(lowerCAmelCase__ ) > 0:
# find and make the largest jump without going over
A = -1
for _k in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A = _k
break
if max_jump >= 0:
A , A , A = jumps[max_jump]
# since the difference between jumps is cached, add c
A = diff + c
for j in range(min(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) ):
A , A = divmod(lowerCAmelCase__ , 10 )
if new_c > 0:
add(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
A = []
else:
A = {c: []}
A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A , A = next_term(lowerCAmelCase__ , k - 1 , i + dn , lowerCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A , A = compute(lowerCAmelCase__ , lowerCAmelCase__ , i + dn , lowerCAmelCase__ )
diff += _diff
dn += terms_jumped
A = sub_memo[c]
# keep jumps sorted by # of terms skipped
A = 0
while j < len(lowerCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase__ , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(lowerCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A = i
A , A , A = 0, 0, 0
for j in range(len(lowerCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A = ds_c + ds_b
diff += addend
A = 0
for j in range(lowerCAmelCase__ ):
A = a_i[j] + addend
A , A = divmod(lowerCAmelCase__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return diff, i - start_i
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
A = digits[j] + addend
if s >= 10:
A , A = divmod(lowerCAmelCase__ , 10 )
A = addend // 10 + quotient
else:
A = s
A = addend // 10
if addend == 0:
break
while addend > 0:
A , A = divmod(lowerCAmelCase__ , 10 )
digits.append(lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : int = 10**15 ) -> int:
'''simple docstring'''
A = [1]
A = 1
A = 0
while True:
A , A = next_term(lowerCAmelCase__ , 20 , i + dn , lowerCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A = 0
for j in range(len(lowerCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 224
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def lowerCamelCase__ ( a__ , a__ , a__ , a__ = 1_0_0 , ) -> Optional[int]:
"""simple docstring"""
_snake_case : List[Any] = x_start
_snake_case : Dict = fnc(lowerCAmelCase_)
_snake_case : Tuple = 0.0
for _ in range(lowerCAmelCase_):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_snake_case : Union[str, Any] = (x_end - x_start) / steps + xa
_snake_case : List[str] = fnc(lowerCAmelCase_)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_snake_case : Any = xa
_snake_case : List[str] = fxa
return area
if __name__ == "__main__":
def lowerCamelCase__ ( a__) -> int:
"""simple docstring"""
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
SCREAMING_SNAKE_CASE_ = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 517
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase : Tuple = '''Usage of script: script_name <size_of_canvas:int>'''
lowerCamelCase : List[Any] = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Dict = [[False for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
return canvas
def snake_case_ ( lowerCAmelCase_ : list[list[bool]] ):
for i, row in enumerate(lowerCAmelCase_ ):
for j, _ in enumerate(lowerCAmelCase_ ):
__lowercase : Dict = bool(random.getrandbits(1 ) )
def snake_case_ ( lowerCAmelCase_ : list[list[bool]] ):
__lowercase : Optional[int] = np.array(lowerCAmelCase_ )
__lowercase : List[str] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowerCAmelCase_ ):
for c, pt in enumerate(lowerCAmelCase_ ):
__lowercase : str = __judge_point(
lowerCAmelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowercase : Dict = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowercase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def snake_case_ ( lowerCAmelCase_ : bool , lowerCAmelCase_ : list[list[bool]] ):
__lowercase : Dict = 0
__lowercase : List[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowercase : Dict = pt
if pt:
if alive < 2:
__lowercase : Tuple = False
elif alive == 2 or alive == 3:
__lowercase : Tuple = True
elif alive > 3:
__lowercase : Optional[int] = False
else:
if alive == 3:
__lowercase : Dict = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase : str = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase : Optional[int] = create_canvas(canvas_size)
seed(c)
lowerCamelCase ,lowerCamelCase : Tuple = plt.subplots()
fig.show()
lowerCamelCase : Dict = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCamelCase : Optional[Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 149
| 0
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__UpperCamelCase = _symbol_database.Default()
__UpperCamelCase = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
__UpperCamelCase = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__UpperCamelCase = None
__UpperCamelCase = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__UpperCamelCase = 45
__UpperCamelCase = 1581
__UpperCamelCase = 1517
__UpperCamelCase = 1570
__UpperCamelCase = 1584
__UpperCamelCase = 1793
__UpperCamelCase = 1795
__UpperCamelCase = 1916
__UpperCamelCase = 1864
__UpperCamelCase = 1905
__UpperCamelCase = 1919
__UpperCamelCase = 2429
__UpperCamelCase = 2208
__UpperCamelCase = 2418
__UpperCamelCase = 2323
__UpperCamelCase = 2407
# @@protoc_insertion_point(module_scope)
| 709
|
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
__UpperCamelCase = True
from torch.cuda.amp import autocast
__UpperCamelCase = logging.getLogger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ) -> List[str]:
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE_ : Optional[bool] = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.0_5 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ : List[str] = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : WavaVecaProcessor
SCREAMING_SNAKE_CASE_ : Union[bool, str] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
def __call__( self , lowerCAmelCase__ ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
SCREAMING_SNAKE_CASE = [{'input_values': feature['input_values']} for feature in features]
SCREAMING_SNAKE_CASE = [{'input_ids': feature['labels']} for feature in features]
SCREAMING_SNAKE_CASE = self.processor.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = self.processor.pad(
labels=lowerCAmelCase__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
SCREAMING_SNAKE_CASE = labels
return batch
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
model.train()
SCREAMING_SNAKE_CASE = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
return loss.detach()
def lowercase () -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
SCREAMING_SNAKE_CASE = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
SCREAMING_SNAKE_CASE = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
SCREAMING_SNAKE_CASE = F'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(SCREAMING_SNAKE_CASE_ : Tuple ):
SCREAMING_SNAKE_CASE = re.sub(SCREAMING_SNAKE_CASE_ , '' , batch['sentence'] ).lower() + ' '
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=['sentence'] )
SCREAMING_SNAKE_CASE = eval_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=['sentence'] )
def extract_all_chars(SCREAMING_SNAKE_CASE_ : List[str] ):
SCREAMING_SNAKE_CASE = ' '.join(batch['text'] )
SCREAMING_SNAKE_CASE = list(set(SCREAMING_SNAKE_CASE_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
SCREAMING_SNAKE_CASE = {v: k for k, v in enumerate(SCREAMING_SNAKE_CASE_ )}
SCREAMING_SNAKE_CASE = vocab_dict[' ']
del vocab_dict[" "]
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_val_samples ) )
SCREAMING_SNAKE_CASE = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(SCREAMING_SNAKE_CASE_ : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(batch['path'] )
SCREAMING_SNAKE_CASE = resampler(SCREAMING_SNAKE_CASE_ ).squeeze().numpy()
SCREAMING_SNAKE_CASE = 1_60_00
SCREAMING_SNAKE_CASE = batch['text']
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(SCREAMING_SNAKE_CASE_ : Optional[int] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
SCREAMING_SNAKE_CASE = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(SCREAMING_SNAKE_CASE_ )
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE = datasets.load_metric('wer' )
def compute_metrics(SCREAMING_SNAKE_CASE_ : Tuple ):
SCREAMING_SNAKE_CASE = pred.predictions
SCREAMING_SNAKE_CASE = np.argmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
SCREAMING_SNAKE_CASE = processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE = processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = wer_metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE = DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# Initialize our Trainer
SCREAMING_SNAKE_CASE = CTCTrainer(
model=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE_ )
return results
if __name__ == "__main__":
main()
| 327
| 0
|
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a__ ( lowerCAmelCase ) -> float:
return np.dot(__UpperCAmelCase , __UpperCAmelCase )
class lowerCamelCase :
'''simple docstring'''
def __init__(self , *,
_lowerCamelCase = np.inf , _lowerCamelCase = "linear" , _lowerCamelCase = 0.0 , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = regularization
UpperCAmelCase__ : List[Any] = gamma
if kernel == "linear":
UpperCAmelCase__ : str = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
UpperCAmelCase__ : Optional[int] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ : int = F"""Unknown kernel: {kernel}"""
raise ValueError(_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return np.dot(_lowerCamelCase , _lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = observations
UpperCAmelCase__ : Dict = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(UpperCAmelCase__) : Optional[int] = np.shape(_lowerCamelCase )
def to_minimize(_lowerCamelCase ) -> float:
UpperCAmelCase__ : List[Any] = 0
(UpperCAmelCase__) : Tuple = np.shape(_lowerCamelCase )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = LinearConstraint(_lowerCamelCase , 0 , 0 )
UpperCAmelCase__ : Tuple = Bounds(0 , self.regularization )
UpperCAmelCase__ : List[Any] = minimize(
_lowerCamelCase , np.ones(_lowerCamelCase ) , bounds=_lowerCamelCase , constraints=[ly_contraint] ).x
UpperCAmelCase__ : List[Any] = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ : Optional[int] = 0
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
UpperCAmelCase__ : Optional[int] = s / n
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = crop_size if crop_size is not None else {"height": 224, "width": 224}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a = image_std if image_std is not None else OPENAI_CLIP_STD
__a = do_convert_rgb
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 67
|
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def _lowerCamelCase( a , a , a ):
__a = hf_hub_url(repo_id=a , path=a , revision=a )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(a )}"
| 67
| 1
|
import os
def _UpperCAmelCase ( a : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
snake_case__ = in_file.read()
snake_case__ = [[int(a ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
snake_case__ = [[0 for cell in row] for row in grid]
snake_case__ = len(grid[0] )
snake_case__ = [[0 for i in range(a )] for j in range(a )]
snake_case__ = grid[0][0]
for i in range(1 , a ):
snake_case__ = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
snake_case__ = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
snake_case__ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ = ["""gpt2"""]
a__ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
super().__init__()
snake_case__ = tokenizer
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__)
snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),))
def __magic_name__ ( self : Tuple , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = self.tokenizer(UpperCamelCase__)
snake_case__ = tokenized["""input_ids"""].to_tensor()
snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
super().setUp()
snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
snake_case__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""")
snake_case__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case__ = python_outputs[key].numpy()
snake_case__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values))
@slow
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.function(UpperCamelCase__)
for test_inputs in self.test_sentences:
snake_case__ = tf.constant(UpperCamelCase__)
snake_case__ = compiled_tokenizer(UpperCamelCase__)
snake_case__ = tf_tokenizer(UpperCamelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = ModelToSave(tokenizer=UpperCamelCase__)
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case__ = Path(UpperCamelCase__) / """saved.model"""
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving})
snake_case__ = tf.saved_model.load(UpperCamelCase__)
snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def __magic_name__ ( self : Tuple):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs
snake_case__ = tf_tokenizer.get_config()
snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__)
snake_case__ = model_from_config(UpperCamelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def __magic_name__ ( self : Dict):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
snake_case__ = tf.convert_to_tensor([self.test_sentences[0]])
snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__)
snake_case__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 654
| 1
|
def lowerCamelCase__ ( a : int = 50_000_000 ) -> int:
"""simple docstring"""
a__ :Optional[Any] = set()
a__ :Optional[int] = int((limit - 24) ** (1 / 2) )
a__ :int = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a ) ) )
for primea in primes:
a__ :int = primea * primea
for primea in primes:
a__ :Dict = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
a__ :str = primea * primea * primea * primea
a__ :Optional[Any] = square + cube + tetr
if total >= limit:
break
ret.add(a )
return len(a )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 373
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'sew'
def __init__( self : Any , __A : str=32 , __A : Dict=768 , __A : int=12 , __A : Dict=12 , __A : Dict=3072 , __A : int=2 , __A : Union[str, Any]="gelu" , __A : Union[str, Any]=0.1 , __A : Optional[Any]=0.1 , __A : Union[str, Any]=0.1 , __A : str=0.0 , __A : Union[str, Any]=0.1 , __A : Optional[Any]=0.1 , __A : Tuple=0.02 , __A : Any=1E-5 , __A : Optional[Any]="group" , __A : str="gelu" , __A : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __A : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __A : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __A : List[str]=False , __A : Tuple=128 , __A : Tuple=16 , __A : Optional[int]=True , __A : Union[str, Any]=0.05 , __A : List[str]=10 , __A : Optional[int]=2 , __A : List[Any]=0.0 , __A : Optional[Any]=10 , __A : Tuple=0 , __A : Tuple="mean" , __A : Any=False , __A : str=False , __A : Dict=256 , __A : Union[str, Any]=0 , __A : Optional[int]=1 , __A : Optional[Any]=2 , **__A : Tuple , ) ->Tuple:
"""simple docstring"""
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
a__ :List[Any] = hidden_size
a__ :List[Any] = feat_extract_norm
a__ :List[str] = feat_extract_activation
a__ :Any = list(__A )
a__ :Dict = list(__A )
a__ :Optional[int] = list(__A )
a__ :Any = conv_bias
a__ :List[str] = num_conv_pos_embeddings
a__ :str = num_conv_pos_embedding_groups
a__ :Optional[int] = len(self.conv_dim )
a__ :List[Any] = num_hidden_layers
a__ :str = intermediate_size
a__ :Dict = squeeze_factor
a__ :List[Any] = hidden_act
a__ :Optional[Any] = num_attention_heads
a__ :Tuple = hidden_dropout
a__ :Tuple = attention_dropout
a__ :List[Any] = activation_dropout
a__ :str = feat_proj_dropout
a__ :Any = final_dropout
a__ :Dict = layerdrop
a__ :List[str] = layer_norm_eps
a__ :Tuple = initializer_range
a__ :Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ :int = apply_spec_augment
a__ :Optional[Any] = mask_time_prob
a__ :List[Any] = mask_time_length
a__ :Any = mask_time_min_masks
a__ :Any = mask_feature_prob
a__ :Dict = mask_feature_length
a__ :Union[str, Any] = mask_feature_min_masks
# ctc loss
a__ :Dict = ctc_loss_reduction
a__ :Any = ctc_zero_infinity
# sequence classification
a__ :Optional[int] = use_weighted_layer_sum
a__ :Tuple = classifier_proj_size
@property
def _snake_case ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 373
| 1
|
"""simple docstring"""
def lowerCamelCase (a_ :List[Any]) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __A (__magic_name__ ):
def __get__( self , UpperCamelCase_ , UpperCamelCase_=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
__UpperCAmelCase : List[str] = "__cached_" + self.fget.__name__
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if cached is None:
__UpperCAmelCase : List[str] = self.fget(UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return cached
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
if is_torch_fx_proxy(lowerCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
return isinstance(lowerCamelCase__ , np.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return _is_numpy(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(lowerCamelCase__ , torch.device )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
if hasattr(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ )
else:
return False
return isinstance(lowerCamelCase__ , torch.dtype )
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
return isinstance(lowerCamelCase__ , tf.Tensor )
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase__ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(lowerCamelCase__ )
return type(lowerCamelCase__ ) == tf.Tensor
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase__ , jnp.ndarray )
def _lowercase ( lowerCamelCase__ ) -> Dict:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return [to_py_obj(lowerCamelCase__ ) for o in obj]
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ ).tolist()
elif isinstance(lowerCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
if isinstance(lowerCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return np.array(lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase__ ):
return np.asarray(lowerCamelCase__ )
else:
return obj
class __A (__magic_name__ ):
def _snake_case ( self ):
__UpperCAmelCase : Any = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase_ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
__UpperCAmelCase : Dict = getattr(self , class_fields[0].name )
__UpperCAmelCase : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = first_field.items()
__UpperCAmelCase : Union[str, Any] = True
else:
try:
__UpperCAmelCase : Optional[int] = iter(UpperCamelCase_ )
__UpperCAmelCase : Dict = True
except TypeError:
__UpperCAmelCase : Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase_ ):
if (
not isinstance(UpperCamelCase_ , (list, tuple) )
or not len(UpperCamelCase_ ) == 2
or not isinstance(element[0] , UpperCamelCase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCAmelCase : Union[str, Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCAmelCase : List[str] = element[1]
elif first_field is not None:
__UpperCAmelCase : Optional[int] = first_field
else:
for field in class_fields:
__UpperCAmelCase : Any = getattr(self , field.name )
if v is not None:
__UpperCAmelCase : Union[str, Any] = v
def __delitem__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , UpperCamelCase_ , UpperCamelCase_ ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ):
# Will raise a KeyException if needed
super().__setitem__(UpperCamelCase_ , UpperCamelCase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
return tuple(self[k] for k in self.keys() )
class __A (__magic_name__ , __magic_name__ ):
@classmethod
def _snake_case ( cls , UpperCamelCase_ ):
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class __A (__magic_name__ ):
snake_case :Dict = "longest"
snake_case :Dict = "max_length"
snake_case :Union[str, Any] = "do_not_pad"
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "pt"
snake_case :List[str] = "tf"
snake_case :Any = "np"
snake_case :Union[str, Any] = "jax"
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Dict = context_managers
__UpperCAmelCase : str = ExitStack()
def __enter__( self ):
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase_ )
def __exit__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
self.stack.__exit__(*UpperCamelCase_ , **UpperCamelCase_ )
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : List[str] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = model_class.__name__
__UpperCAmelCase : List[str] = infer_framework(lowerCamelCase__ )
if framework == "tf":
__UpperCAmelCase : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = "" , lowerCamelCase__ = "." ) -> Optional[Any]:
"""simple docstring"""
def _flatten_dict(lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="." ):
for k, v in d.items():
__UpperCAmelCase : Union[str, Any] = str(lowerCamelCase__ ) + delimiter + str(lowerCamelCase__ ) if parent_key else k
if v and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
yield from flatten_dict(lowerCamelCase__ , lowerCamelCase__ , delimiter=lowerCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) )
@contextmanager
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = False ) -> Union[str, Any]:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> str:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.T if axes is None else array.permute(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCamelCase__ , perm=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.transpose(lowerCamelCase__ , axes=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for transpose: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.reshape(*lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.reshape(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for reshape: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.squeeze(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.expand_dims(lowerCamelCase__ , lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.unsqueeze(dim=lowerCamelCase__ )
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return jnp.expand_dims(lowerCamelCase__ , axis=lowerCamelCase__ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if is_numpy_array(lowerCamelCase__ ):
return np.size(lowerCamelCase__ )
elif is_torch_tensor(lowerCamelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCamelCase__ ):
import tensorflow as tf
return tf.size(lowerCamelCase__ )
elif is_jax_tensor(lowerCamelCase__ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(lowerCamelCase__ )}.""" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowerCamelCase__ , (tuple, list) ):
__UpperCAmelCase : List[str] = [f"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCAmelCase : int = f"""{repo_id}--{value}"""
return auto_map
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
for base_class in inspect.getmro(lowerCamelCase__ ):
__UpperCAmelCase : Tuple = base_class.__module__
__UpperCAmelCase : Union[str, Any] = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 168
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__A = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Any , _lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
__lowerCamelCase : str = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
__lowerCamelCase : str = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
__lowerCamelCase : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
__lowerCamelCase : Optional[Any] = value
elif weight_type == "weight_v":
__lowerCamelCase : Any = value
elif weight_type == "bias":
__lowerCamelCase : str = value
elif weight_type == "running_mean":
__lowerCamelCase : Tuple = value
elif weight_type == "running_var":
__lowerCamelCase : Optional[int] = value
elif weight_type == "num_batches_tracked":
__lowerCamelCase : Optional[Any] = value
elif weight_type == "inv_freq":
__lowerCamelCase : int = value
else:
__lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: List[str] , _lowerCamelCase: str ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : int = []
__lowerCamelCase : Union[str, Any] = fairseq_model.state_dict()
__lowerCamelCase : Tuple = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
__lowerCamelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__lowerCamelCase : Union[str, Any] = True
if "*" in mapped_key:
__lowerCamelCase : Union[str, Any] = name.split(_lowerCamelCase )[0].split("." )[-2]
__lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
__lowerCamelCase : List[str] = None
elif "pos_bias_v" in name:
__lowerCamelCase : List[str] = None
elif "weight_g" in name:
__lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
__lowerCamelCase : str = "weight_v"
elif "bias" in name:
__lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : int = "weight"
elif "running_mean" in name:
__lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
__lowerCamelCase : int = "inv_freq"
elif "running_var" in name:
__lowerCamelCase : int = "running_var"
elif "num_batches_tracked" in name:
__lowerCamelCase : str = "num_batches_tracked"
else:
__lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Tuple = full_name.split("conv_layers." )[-1]
__lowerCamelCase : Any = name.split("." )
__lowerCamelCase : Optional[int] = int(items[0] )
__lowerCamelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: int , _lowerCamelCase: List[Any]=None , _lowerCamelCase: str=None , _lowerCamelCase: Optional[Any]=True ) -> int:
'''simple docstring'''
if config_path is not None:
__lowerCamelCase : List[str] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
__lowerCamelCase : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowerCamelCase : List[str] = "rotary"
if is_finetuned:
if dict_path:
__lowerCamelCase : Union[str, Any] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase : List[str] = target_dict.pad_index
__lowerCamelCase : Dict = target_dict.bos_index
__lowerCamelCase : Any = target_dict.eos_index
__lowerCamelCase : List[str] = len(target_dict.symbols )
__lowerCamelCase : str = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__lowerCamelCase : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[int] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Any = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
__lowerCamelCase : int = True if config.feat_extract_norm == "layer" else False
__lowerCamelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
__lowerCamelCase : Any = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
__lowerCamelCase : List[str] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
__lowerCamelCase : str = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__lowerCamelCase : int = argparse.Namespace(task="audio_pretraining" )
__lowerCamelCase : str = fairseq.tasks.setup_task(_lowerCamelCase )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
__lowerCamelCase : Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366
| 1
|
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case_ ( snake_case_ ):
'''simple docstring'''
def __init__( self : Optional[Any] , __magic_name__ : Optional[int] ) -> List[str]:
lowerCamelCase_ : List[str] = data
def __iter__( self : Optional[int] ) -> str:
for element in self.data:
yield element
def __a ( __UpperCAmelCase : str=True ) -> str:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = Accelerator(even_batches=__UpperCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __a ( __UpperCAmelCase : Accelerator , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if iterable:
lowerCamelCase_ : List[str] = DummyIterableDataset(torch.as_tensor(range(__UpperCAmelCase ) ) )
else:
lowerCamelCase_ : List[Any] = TensorDataset(torch.as_tensor(range(__UpperCAmelCase ) ) )
lowerCamelCase_ : Optional[int] = DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase )
lowerCamelCase_ : str = accelerator.prepare(__UpperCAmelCase )
return dl
def __a ( __UpperCAmelCase : Accelerator , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : List[int] , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : List[Any] = create_dataloader(accelerator=__UpperCAmelCase , dataset_size=__UpperCAmelCase , batch_size=__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __a ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __a ( ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Dict = create_accelerator(even_batches=__UpperCAmelCase )
verify_dataloader_batch_sizes(
__UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __a ( ) -> Any:
"""simple docstring"""
lowerCamelCase_ : int = create_accelerator(even_batches=__UpperCAmelCase )
lowerCamelCase_ : List[Any] = torch.nn.Linear(1 , 1 )
lowerCamelCase_ : str = accelerator.prepare(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = create_dataloader(__UpperCAmelCase , dataset_size=3 , batch_size=1 )
lowerCamelCase_ : Optional[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__UpperCAmelCase ):
lowerCamelCase_ : List[Any] = ddp_model(batch[0].float() )
lowerCamelCase_ : int = output.sum()
loss.backward()
batch_idxs.append(__UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __a ( __UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
with warnings.catch_warnings(record=__UpperCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __UpperCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def __a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Any = create_accelerator(even_batches=__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = torch.nn.Linear(1 , 1 )
lowerCamelCase_ : Optional[Any] = accelerator.prepare(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = create_dataloader(__UpperCAmelCase , dataset_size=3 , batch_size=1 )
lowerCamelCase_ : Dict = create_dataloader(__UpperCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__UpperCAmelCase ):
lowerCamelCase_ : Union[str, Any] = train_dl.batch_sampler.even_batches
lowerCamelCase_ : Union[str, Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __a ( ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : str = create_accelerator(even_batches=__UpperCAmelCase )
lowerCamelCase_ : int = torch.nn.Linear(1 , 1 )
lowerCamelCase_ : str = accelerator.prepare(__UpperCAmelCase )
create_dataloader(__UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=__UpperCAmelCase )
lowerCamelCase_ : Any = create_dataloader(__UpperCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__UpperCAmelCase ):
lowerCamelCase_ : str = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __a ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : List[str] = create_accelerator()
lowerCamelCase_ : Union[str, Any] = torch.nn.Linear(1 , 1 )
lowerCamelCase_ : Any = accelerator.prepare(__UpperCAmelCase )
create_dataloader(__UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=__UpperCAmelCase )
with warnings.catch_warnings(record=__UpperCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__UpperCAmelCase ):
pass
assert issubclass(w[-1].category , __UpperCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def __a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : int = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
lowerCamelCase_ : int = accelerator.state.distributed_type
lowerCamelCase_ : Optional[int] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = original_state
if __name__ == "__main__":
main()
| 488
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : str = KandinskyInpaintPipeline
_lowerCAmelCase : List[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_lowerCAmelCase : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_lowerCAmelCase : List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_lowerCAmelCase : Union[str, Any] = False
@property
def __lowercase ( self : str ):
"""simple docstring"""
return 32
@property
def __lowercase ( self : str ):
"""simple docstring"""
return 32
@property
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def __lowercase ( self : int ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase ( self : int ):
"""simple docstring"""
return 1_00
@property
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
SCREAMING_SNAKE_CASE : Any = MultilingualCLIP(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Tuple = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : int = self.dummy_unet
SCREAMING_SNAKE_CASE : str = self.dummy_movq
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase__ )
# create init_image
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create mask
SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : str = 0
if str(lowerCAmelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''cpu'''
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : int = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Dict = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self : str ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = '''a hat'''
SCREAMING_SNAKE_CASE : str = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Dict = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = pipe_prior(
lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Tuple = pipeline(
lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 527
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowerCAmelCase : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=snake_case__ , cache_dir=snake_case__ )
_lowerCAmelCase : Union[str, Any] = [t[-1] for t in os.walk(os.path.join(snake_case__ , os.listdir(snake_case__ )[0] , 'snapshots' ) )]
_lowerCAmelCase : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=snake_case__ )
_lowerCAmelCase : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase : List[str] = jax.random.PRNGKey(0 )
_lowerCAmelCase : Optional[Any] = 4
_lowerCAmelCase : str = jax.device_count()
_lowerCAmelCase : Union[str, Any] = num_samples * [prompt]
_lowerCAmelCase : List[Any] = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
_lowerCAmelCase : List[Any] = replicate(snake_case__ )
_lowerCAmelCase : Tuple = jax.random.split(snake_case__ , snake_case__ )
_lowerCAmelCase : int = shard(snake_case__ )
_lowerCAmelCase : List[str] = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.151_4745 ) < 1E-3
assert np.abs(np.abs(snake_case__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
_lowerCAmelCase : Dict = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(snake_case__ ) == num_samples
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=snake_case__ )
_lowerCAmelCase : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase : str = jax.random.PRNGKey(0 )
_lowerCAmelCase : Optional[Any] = 50
_lowerCAmelCase : List[str] = jax.device_count()
_lowerCAmelCase : Union[str, Any] = num_samples * [prompt]
_lowerCAmelCase : str = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
_lowerCAmelCase : Optional[Any] = replicate(snake_case__ )
_lowerCAmelCase : Dict = jax.random.split(snake_case__ , snake_case__ )
_lowerCAmelCase : str = shard(snake_case__ )
_lowerCAmelCase : Optional[Any] = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0565_2401) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case__ )
_lowerCAmelCase : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase : List[Any] = jax.random.PRNGKey(0 )
_lowerCAmelCase : Optional[int] = 50
_lowerCAmelCase : Optional[Any] = jax.device_count()
_lowerCAmelCase : Optional[Any] = num_samples * [prompt]
_lowerCAmelCase : Optional[Any] = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
_lowerCAmelCase : List[str] = replicate(snake_case__ )
_lowerCAmelCase : str = jax.random.split(snake_case__ , snake_case__ )
_lowerCAmelCase : List[Any] = shard(snake_case__ )
_lowerCAmelCase : List[Any] = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_lowerCAmelCase : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase : Any = jax.random.PRNGKey(0 )
_lowerCAmelCase : Union[str, Any] = 50
_lowerCAmelCase : Optional[Any] = jax.device_count()
_lowerCAmelCase : List[Any] = num_samples * [prompt]
_lowerCAmelCase : Dict = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
_lowerCAmelCase : Union[str, Any] = replicate(snake_case__ )
_lowerCAmelCase : str = jax.random.split(snake_case__ , snake_case__ )
_lowerCAmelCase : Dict = shard(snake_case__ )
_lowerCAmelCase : Dict = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0400_3906) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = FlaxDDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=snake_case__ , steps_offset=1 , )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=snake_case__ , safety_checker=snake_case__ , )
_lowerCAmelCase : Tuple = scheduler.create_state()
_lowerCAmelCase : str = scheduler_state
_lowerCAmelCase : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase : List[str] = jax.random.PRNGKey(0 )
_lowerCAmelCase : List[str] = 50
_lowerCAmelCase : str = jax.device_count()
_lowerCAmelCase : List[str] = num_samples * [prompt]
_lowerCAmelCase : Union[str, Any] = pipeline.prepare_inputs(snake_case__ )
# shard inputs and rng
_lowerCAmelCase : Dict = replicate(snake_case__ )
_lowerCAmelCase : Tuple = jax.random.split(snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = shard(snake_case__ )
_lowerCAmelCase : Tuple = pipeline(snake_case__ , snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1E-3
assert np.abs((np.abs(snake_case__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase : List[str] = jax.device_count()
_lowerCAmelCase : List[str] = num_samples * [prompt]
_lowerCAmelCase : List[str] = jax.random.split(jax.random.PRNGKey(0 ) , snake_case__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case__ , )
_lowerCAmelCase : str = replicate(snake_case__ )
_lowerCAmelCase : int = pipeline.prepare_inputs(snake_case__ )
_lowerCAmelCase : Any = shard(snake_case__ )
_lowerCAmelCase : List[str] = pipeline(snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_lowerCAmelCase : Optional[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_lowerCAmelCase , _lowerCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case__ , use_memory_efficient_attention=snake_case__ , )
_lowerCAmelCase : List[str] = replicate(snake_case__ )
_lowerCAmelCase : Optional[Any] = pipeline.prepare_inputs(snake_case__ )
_lowerCAmelCase : List[Any] = shard(snake_case__ )
_lowerCAmelCase : Optional[Any] = pipeline(snake_case__ , snake_case__ , snake_case__ , jit=snake_case__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_lowerCAmelCase : Tuple = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 630
|
'''simple docstring'''
lowerCAmelCase : Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase : list[bool | None] = [None] * 10_00_00_00
lowerCAmelCase : List[str] = True
lowerCAmelCase : Union[str, Any] = False
def lowercase (_A ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase : Any = chain(next_number(_A ) )
_lowerCAmelCase : List[str] = number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase : Tuple = number_chain
number *= 1_0
return number_chain
def lowercase (_A = 1_0_0_0_0_0_0_0 ):
"""simple docstring"""
for i in range(1 , _A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 630
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase_ : Tuple =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.dummy_uncond_unet
UpperCamelCase_ : Optional[int] =PNDMScheduler()
UpperCamelCase_ : Any =PNDMPipeline(unet=_snake_case , scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
UpperCamelCase_ : Optional[int] =torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] =pndm(generator=_snake_case , num_inference_steps=20 , output_type='numpy' ).images
UpperCamelCase_ : Tuple =torch.manual_seed(0 )
UpperCamelCase_ : str =pndm(generator=_snake_case , num_inference_steps=20 , output_type='numpy' , return_dict=_snake_case )[0]
UpperCamelCase_ : Tuple =image[0, -3:, -3:, -1]
UpperCamelCase_ : Tuple =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_ : List[Any] =np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] ='google/ddpm-cifar10-32'
UpperCamelCase_ : Any =UNetaDModel.from_pretrained(_snake_case )
UpperCamelCase_ : List[str] =PNDMScheduler()
UpperCamelCase_ : Dict =PNDMPipeline(unet=_snake_case , scheduler=_snake_case )
pndm.to(_snake_case )
pndm.set_progress_bar_config(disable=_snake_case )
UpperCamelCase_ : List[Any] =torch.manual_seed(0 )
UpperCamelCase_ : Tuple =pndm(generator=_snake_case , output_type='numpy' ).images
UpperCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase_ : int =np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 357
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ :int = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any], _snake_case : Dict=2_2_4, _snake_case : Optional[Any]=4, _snake_case : Dict=3, _snake_case : int=9_6, _snake_case : int=[2, 2, 6, 2], _snake_case : int=[3, 6, 1_2, 2_4], _snake_case : Tuple=7, _snake_case : Tuple=4.0, _snake_case : int=True, _snake_case : Union[str, Any]=0.0, _snake_case : Tuple=0.0, _snake_case : Dict=0.1, _snake_case : Optional[int]="gelu", _snake_case : List[str]=False, _snake_case : Union[str, Any]=0.0_2, _snake_case : int=1e-5, _snake_case : Any=None, _snake_case : Tuple=None, **_snake_case : int, ) ->Optional[int]:
super().__init__(**_snake_case )
snake_case__ : List[Any] = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Union[str, Any] = embed_dim
snake_case__ : Dict = depths
snake_case__ : Optional[Any] = len(_snake_case )
snake_case__ : Optional[int] = num_heads
snake_case__ : Union[str, Any] = window_size
snake_case__ : Tuple = mlp_ratio
snake_case__ : Tuple = qkv_bias
snake_case__ : int = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Optional[Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Tuple = use_absolute_embeddings
snake_case__ : Tuple = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : int = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
snake_case__ : int = ['stem'] + [F'''stage{idx}''' for idx in range(1, len(_snake_case ) + 1 )]
snake_case__ , snake_case__ : int = get_aligned_output_features_output_indices(
out_features=_snake_case, out_indices=_snake_case, stage_names=self.stage_names )
| 478
| 0
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A_ ( nn.Module ):
def __init__( self : str , snake_case_ : nn.Module , snake_case_ : int ):
super().__init__()
_UpperCAmelCase = module
_UpperCAmelCase = nn.Sequential(
nn.Linear(module.in_features , snake_case_ , bias=snake_case_ ) , nn.Linear(snake_case_ , module.out_features , bias=snake_case_ ) , )
_UpperCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=snake_case_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase ( self : int , snake_case_ : List[str] , *snake_case_ : Dict , **snake_case_ : Dict ):
return self.module(snake_case_ , *snake_case_ , **snake_case_ ) + self.adapter(snake_case_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowerCamelCase : Any = """bigscience/bloom-1b7"""
# Constant values
_lowerCamelCase : Optional[int] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
_lowerCamelCase : str = """Hello my name is"""
_lowerCamelCase : List[Any] = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_lowerCamelCase : List[Any] = 10
def lowercase ( self : Dict ):
# Models and tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : str ):
super().setUp()
# Models and tokenizer
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
def lowercase ( self : Any ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.model_abit.config
self.assertTrue(hasattr(snake_case_ , "quantization_config" ) )
_UpperCAmelCase = config.to_dict()
_UpperCAmelCase = config.to_diff_dict()
_UpperCAmelCase = config.to_json_string()
def lowercase ( self : Optional[Any] ):
from bitsandbytes.nn import Paramsabit
_UpperCAmelCase = self.model_fpaa.get_memory_footprint()
_UpperCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_UpperCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase ( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def lowercase ( self : Tuple ):
_UpperCAmelCase = BitsAndBytesConfig()
_UpperCAmelCase = True
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def lowercase ( self : List[str] ):
with self.assertRaises(snake_case_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = BitsAndBytesConfig()
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , load_in_abit=snake_case_ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def lowercase ( self : List[Any] ):
with self.assertRaises(snake_case_ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(snake_case_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = self.model_fpaa.to(torch.floataa )
_UpperCAmelCase = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.to("cpu" )
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.float()
def lowercase ( self : str ):
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=snake_case_ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
@classmethod
def lowercase ( cls : List[Any] ):
_UpperCAmelCase = "t5-small"
_UpperCAmelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
_UpperCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
_UpperCAmelCase = "Translate in German: Hello, my dog is cute"
def lowercase ( self : Dict ):
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Any ):
from transformers import TaForConditionalGeneration
_UpperCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
_UpperCAmelCase = None
# test with `t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
# test with `flan-t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
_UpperCAmelCase = modules
def lowercase ( self : Any ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
# test with `flan-t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : List[str] ):
super().setUp()
# model_name
_UpperCAmelCase = "bigscience/bloom-560m"
_UpperCAmelCase = "t5-small"
# Different types of model
_UpperCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# Sequence classification model
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# CausalLM model
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# Seq2seq model
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=snake_case_ , device_map="auto" )
def lowercase ( self : Dict ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Tuple ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Union[str, Any] ):
super().setUp()
def lowercase ( self : Optional[int] ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_UpperCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Tuple ):
super().setUp()
def lowercase ( self : List[Any] ):
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
_UpperCAmelCase = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Dict ):
_UpperCAmelCase = "facebook/opt-350m"
super().setUp()
def lowercase ( self : Dict ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_UpperCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_UpperCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case_ ) ):
_UpperCAmelCase = LoRALayer(module.q_proj , rank=1_6 )
_UpperCAmelCase = LoRALayer(module.k_proj , rank=1_6 )
_UpperCAmelCase = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
_UpperCAmelCase = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_UpperCAmelCase = model.forward(**snake_case_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case_ , snake_case_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """gpt2-xl"""
_lowerCamelCase : str = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 119
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE :List[str] = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
| 1
|
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : bool , ):
lowerCamelCase : Optional[Any] = grid.shape
lowerCamelCase : Optional[Any] = [-1, 1, 0, 0]
lowerCamelCase : str = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase : List[str] = [(0, source)], set()
lowerCamelCase : Any = np.full((rows, cols) , np.inf)
lowerCamelCase : Tuple = 0
lowerCamelCase : Optional[Any] = np.empty((rows, cols) , dtype=_A)
lowerCamelCase : Any = None
while queue:
(lowerCamelCase) : int = heappop(_A)
if (x, y) in visited:
continue
visited.add((x, y))
if (x, y) == destination:
lowerCamelCase : int = []
while (x, y) != source:
path.append((x, y))
lowerCamelCase : Dict = predecessors[x, y]
path.append(_A) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_A)):
lowerCamelCase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase : Optional[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_A , (dist + 1, (nx, ny)))
lowerCamelCase : Union[str, Any] = dist + 1
lowerCamelCase : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' ,lowerCAmelCase ,)
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : int = RobertaConfig
UpperCAmelCase_ : Tuple = '''roberta'''
def __init__( self ,lowerCamelCase_ ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = RobertaEmbeddings(lowerCamelCase_ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' ,lowerCAmelCase ,)
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = RobertaConfig
UpperCAmelCase_ : Optional[Any] = '''roberta'''
def __init__( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
super().__init__(lowerCamelCase_ )
UpperCAmelCase__ : Any = config.num_labels
UpperCAmelCase__ : Union[str, Any] = config.num_hidden_layers
UpperCAmelCase__ : str = DeeRobertaModel(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase__ : Union[str, Any] = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=-1 ,lowerCamelCase_=False ,) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.num_layers
try:
UpperCAmelCase__ : Union[str, Any] = self.roberta(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,position_ids=lowerCamelCase_ ,head_mask=lowerCamelCase_ ,inputs_embeds=lowerCamelCase_ ,)
UpperCAmelCase__ : List[Any] = outputs[1]
UpperCAmelCase__ : Tuple = self.dropout(lowerCamelCase_ )
UpperCAmelCase__ : Any = self.classifier(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase__ : int = e.message
UpperCAmelCase__ : Optional[int] = e.exit_layer
UpperCAmelCase__ : Optional[int] = outputs[0]
if not self.training:
UpperCAmelCase__ : Dict = entropy(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase__ : str = MSELoss()
UpperCAmelCase__ : Union[str, Any] = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
UpperCAmelCase__ : Any = CrossEntropyLoss()
UpperCAmelCase__ : Optional[Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
UpperCAmelCase__ : List[str] = []
for highway_exit in outputs[-1]:
UpperCAmelCase__ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase__ : Any = MSELoss()
UpperCAmelCase__ : Optional[Any] = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
UpperCAmelCase__ : Any = CrossEntropyLoss()
UpperCAmelCase__ : int = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowerCamelCase_ )
if train_highway:
UpperCAmelCase__ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase__ : Dict = (loss,) + outputs
if not self.training:
UpperCAmelCase__ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase__ : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 614
| 0
|
def lowercase ( __A : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = []
snake_case : Dict = set({"""(""", """[""", """{"""} )
snake_case : Union[str, Any] = set({""")""", """]""", """}"""} )
snake_case : Optional[int] = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(__A ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(__A ) == 0 or (len(__A ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(__A ) == 0
def lowercase ( ) -> int:
'''simple docstring'''
snake_case : Optional[int] = input("""Enter sequence of brackets: """ )
if is_balanced(__A ):
print(__A , """is balanced""" )
else:
print(__A , """is not balanced""" )
if __name__ == "__main__":
main()
| 715
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[int] = logging.get_logger(__name__)
def lowercase ( __A : str ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[int] = torch.load(__A , map_location="""cpu""" )
if "model" in sd.keys():
snake_case : Any = torch.load(__A , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
snake_case : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A )
snake_case : List[Any] = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case : int = sd.pop(__A )
snake_case : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case : List[str] = sd[key]
# We split QKV in separate Q,K,V
snake_case : Dict = key.replace(""".qkv_proj.""" , """.q_proj.""" )
snake_case : Any = key.replace(""".qkv_proj.""" , """.k_proj.""" )
snake_case : List[str] = key.replace(""".qkv_proj.""" , """.v_proj.""" )
snake_case : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case , snake_case , snake_case : str = torch.split(__A , depth // 3 , dim=0 )
snake_case : Tuple = q
snake_case : List[Any] = k
snake_case : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowercase ( __A : Optional[Any] , __A : Tuple , __A : List[str]=None ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = load_checkpoint(__A )
if config is not None:
snake_case : List[Any] = OPTConfig.from_pretrained(__A )
else:
snake_case : Any = OPTConfig()
snake_case : Union[str, Any] = OPTModel(__A ).half().eval()
model.load_state_dict(__A )
# Check results
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 315
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Dict = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 70
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_A: Any = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , *__A , **__A ):
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 126
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = FunnelTokenizer
__UpperCamelCase = FunnelTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = """UNwant\u00E9d,running"""
lowerCamelCase = """unwanted, running"""
return input_text, output_text
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.tokenizer_class(self.vocab_file )
lowerCamelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
lowerCamelCase = tokenizer("""UNwant\u00E9d,running""" )
lowerCamelCase = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
lowerCamelCase = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 701
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase : Dict = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a__ ( snake_case__ ) -> list[float]:
lowerCamelCase = []
lowerCamelCase = len(snake_case__ )
for i in range(snake_case__ ):
lowerCamelCase = -1
for j in range(i + 1 , snake_case__ ):
if arr[i] < arr[j]:
lowerCamelCase = arr[j]
break
result.append(snake_case__ )
return result
def a__ ( snake_case__ ) -> list[float]:
lowerCamelCase = []
for i, outer in enumerate(snake_case__ ):
lowerCamelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase = inner
break
result.append(snake_case__ )
return result
def a__ ( snake_case__ ) -> list[float]:
lowerCamelCase = len(snake_case__ )
lowerCamelCase = []
lowerCamelCase = [-1] * arr_size
for index in reversed(range(snake_case__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase : Dict = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 533
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
A__ : Tuple = TypeVar('T')
class __magic_name__ ( Generic[T] ):
def __init__( self , A_ , A_ ) -> None:
"""simple docstring"""
_lowercase: Any | T = None
_lowercase: int = len(A_ )
_lowercase: list[T] = [any_type for _ in range(self.N )] + arr
_lowercase: Tuple = fnc
self.build()
def lowercase_ ( self ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
_lowercase: int = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase_ ( self , A_ , A_ ) -> None:
"""simple docstring"""
p += self.N
_lowercase: str = v
while p > 1:
_lowercase: Dict = p // 2
_lowercase: Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase_ ( self , A_ , A_ ) -> T | None: # noqa: E741
"""simple docstring"""
_lowercase , _lowercase: List[Any] = l + self.N, r + self.N
_lowercase: T | None = None
while l <= r:
if l % 2 == 1:
_lowercase: Dict = self.st[l] if res is None else self.fn(A_ , self.st[l] )
if r % 2 == 0:
_lowercase: Dict = self.st[r] if res is None else self.fn(A_ , self.st[r] )
_lowercase , _lowercase: Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
A__ : List[Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
A__ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
A__ : int = SegmentTree(test_array, min)
A__ : Tuple = SegmentTree(test_array, max)
A__ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def _lowerCAmelCase ( ):
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(_UpperCamelCase , len(_UpperCamelCase ) ):
_lowercase: str = reduce(_UpperCamelCase , test_array[i : j + 1] )
_lowercase: Dict = reduce(_UpperCamelCase , test_array[i : j + 1] )
_lowercase: int = reduce(lambda _UpperCamelCase , _UpperCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCamelCase , _UpperCamelCase )
assert max_range == max_segment_tree.query(_UpperCamelCase , _UpperCamelCase )
assert sum_range == sum_segment_tree.query(_UpperCamelCase , _UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
A__ : List[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 353
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A__ : int = threading.Lock()
A__ : Optional[logging.Handler] = None
A__ : str = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
A__ : str = logging.WARNING
A__ : Union[str, Any] = True
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , _UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def _lowerCAmelCase ( ):
"""simple docstring"""
return __name__.split('''.''' )[0]
def _lowerCAmelCase ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowercase: int = logging.StreamHandler() # Set sys.stderr as stream.
_lowercase: Dict = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowercase: Dict = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowercase: Optional[Any] = False
def _lowerCAmelCase ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_lowercase: Tuple = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowercase: Dict = None
def _lowerCAmelCase ( ):
"""simple docstring"""
return log_levels
def _lowerCAmelCase ( _UpperCamelCase = None ):
"""simple docstring"""
if name is None:
_lowercase: Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowercase: str = False
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowercase: List[str] = True
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Any = _get_library_root_logger().handlers
for handler in handlers:
_lowercase: int = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[str] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_UpperCamelCase )
def _lowerCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase: Any = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , _UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*_UpperCamelCase , **_UpperCamelCase )
A__ : Optional[int] = warning_advice
@functools.lru_cache(_UpperCamelCase )
def _lowerCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
self.warning(*_UpperCamelCase , **_UpperCamelCase )
A__ : List[Any] = warning_once
class __magic_name__ :
def __init__( self , *A_ , **A_ ) -> Any: # pylint: disable=unused-argument
"""simple docstring"""
_lowercase: Tuple = args[0] if args else None
def __iter__( self ) -> Union[str, Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , A_ ) -> List[Any]:
"""simple docstring"""
def empty_fn(*A_ , **A_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
return
class __magic_name__ :
def __call__( self , *A_ , **A_ ) -> Dict:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*A_ , **A_ )
else:
return EmptyTqdm(*A_ , **A_ )
def lowercase_ ( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
_lowercase: Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A_ , **A_ )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A__ : str = _tqdm_cls()
def _lowerCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
_lowercase: str = True
hf_hub_utils.enable_progress_bars()
def _lowerCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
_lowercase: Union[str, Any] = False
hf_hub_utils.disable_progress_bars()
| 353
| 1
|
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 ,number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 72
|
class A_ :
def __init__( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ):
lowercase = name
lowercase = val
def __str__( self : str ):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : int , snake_case__ : Optional[int] ):
return self.val < other.val
class A_ :
def __init__( self : str , snake_case__ : List[str] ):
lowercase = {}
lowercase = {}
lowercase = self.build_heap(snake_case__ )
def __getitem__( self : Union[str, Any] , snake_case__ : int ):
return self.get_value(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[Any] ):
return (idx - 1) // 2
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Dict ):
return idx * 2 + 1
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Optional[Any] ):
return idx * 2 + 2
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case__ : Dict ):
return self.heap_dict[key]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Any ):
lowercase = len(snake_case__ ) - 1
lowercase = self.get_parent_idx(snake_case__ )
for idx, i in enumerate(snake_case__ ):
lowercase = idx
lowercase = i.val
for i in range(snake_case__ , -1 , -1 ):
self.sift_down(snake_case__ , snake_case__ )
return array
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : str ):
while True:
lowercase = self.get_left_child_idx(snake_case__ ) # noqa: E741
lowercase = self.get_right_child_idx(snake_case__ )
lowercase = idx
if l < len(snake_case__ ) and array[l] < array[idx]:
lowercase = l
if r < len(snake_case__ ) and array[r] < array[smallest]:
lowercase = r
if smallest != idx:
lowercase , lowercase = array[smallest], array[idx]
(
(
lowercase
) , (
lowercase
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase = smallest
else:
break
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[int] ):
lowercase = self.get_parent_idx(snake_case__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase , lowercase = self.heap[idx], self.heap[p]
lowercase , lowercase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase = p
lowercase = self.get_parent_idx(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self.heap[0]
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase , lowercase = self.heap[-1], self.heap[0]
lowercase , lowercase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ):
self.heap.append(snake_case__ )
lowercase = len(self.heap ) - 1
lowercase = node.val
self.sift_up(len(self.heap ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
return len(self.heap ) == 0
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : int , snake_case__ : Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase = new_value
lowercase = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE : Any =Node('''R''', -1)
__SCREAMING_SNAKE_CASE : Union[str, Any] =Node('''B''', 6)
__SCREAMING_SNAKE_CASE : str =Node('''A''', 3)
__SCREAMING_SNAKE_CASE : List[Any] =Node('''X''', 1)
__SCREAMING_SNAKE_CASE : str =Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE : Any =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 1
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = RobertaPreLayerNormConfig.from_pretrained(
lowerCAmelCase__ ,architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowerCamelCase_ = torch.load(hf_hub_download(repo_id=lowerCAmelCase__ ,filename='''pytorch_model.bin''' ) )
lowerCamelCase_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowerCamelCase_ = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowerCamelCase_ = tensor_value
lowerCamelCase_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase__ ,config=lowerCAmelCase__ ,state_dict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
# convert tokenizer
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 29
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( _lowercase ) -> str:
if not isinstance(_lowercase , _lowercase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
UpperCamelCase = precision
UpperCamelCase = ceil(precision / 14 )
UpperCamelCase = 426880 * Decimal(10005 ).sqrt()
UpperCamelCase = 1
UpperCamelCase = 13591409
UpperCamelCase = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_snake_case = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 282
| 0
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
A__ = logging.get_logger(__name__)
A__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
A__ = {
'''facebook/blenderbot_small-90M''': 512,
}
class a ( UpperCAmelCase__ ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Dict = BlenderbotSmallTokenizer
def __init__( self :List[Any] ,__lowercase :Union[str, Any]=None ,__lowercase :Tuple=None ,__lowercase :Any="<|endoftext|>" ,__lowercase :Tuple="<|endoftext|>" ,__lowercase :str="<|endoftext|>" ,__lowercase :Any=False ,__lowercase :Union[str, Any]=True ,**__lowercase :int ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__lowerCAmelCase ,merges=__lowerCAmelCase ,add_prefix_space=__lowerCAmelCase ,trim_offsets=__lowerCAmelCase ,) ,bos_token=__lowerCAmelCase ,eos_token=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,**__lowerCAmelCase ,)
snake_case__ : str = add_prefix_space
def __lowerCamelCase ( self :str ,__lowercase :List[Any] ,__lowercase :List[str]=None ):
snake_case__ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self :int ,__lowercase :List[int] ,__lowercase :Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 710
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _lowerCAmelCase ( __lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if subparsers is not None:
snake_case__ : Any = subparsers.add_parser('''tpu-config''' , description=_description )
else:
snake_case__ : int = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
snake_case__ : Tuple = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
snake_case__ : str = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
snake_case__ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case__ : int = defaults.commands
if not args.tpu_name:
snake_case__ : List[Any] = defaults.tpu_name
if not args.tpu_zone:
snake_case__ : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case__ : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
snake_case__ : Union[str, Any] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
snake_case__ : List[str] = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
snake_case__ : Any = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
snake_case__ : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case__ : List[str] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
snake_case__ : Dict = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case__ : Optional[int] = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(__lowerCAmelCase )}""" )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = tpu_command_parser()
snake_case__ : int = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 219
| 0
|
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = '''Hello world! cécé herlolip'''
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = FairseqRobertaModel.from_pretrained(lowerCAmelCase_ )
roberta.eval() # disable dropout
_snake_case = roberta.model.encoder.sentence_encoder
_snake_case = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
_snake_case = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , lowerCAmelCase_ )
_snake_case = XLMRobertaXLForSequenceClassification(lowerCAmelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_snake_case = roberta_sent_encoder.embed_tokens.weight
_snake_case = roberta_sent_encoder.embed_positions.weight
_snake_case = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_snake_case = roberta_sent_encoder.layer_norm.weight
_snake_case = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_snake_case = model.roberta.encoder.layer[i]
_snake_case = roberta_sent_encoder.layers[i]
_snake_case = layer.attention
_snake_case = roberta_layer.self_attn_layer_norm.weight
_snake_case = roberta_layer.self_attn_layer_norm.bias
# self attention
_snake_case = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_snake_case = roberta_layer.self_attn.q_proj.weight
_snake_case = roberta_layer.self_attn.q_proj.bias
_snake_case = roberta_layer.self_attn.k_proj.weight
_snake_case = roberta_layer.self_attn.k_proj.bias
_snake_case = roberta_layer.self_attn.v_proj.weight
_snake_case = roberta_layer.self_attn.v_proj.bias
# self-attention output
_snake_case = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_snake_case = roberta_layer.self_attn.out_proj.weight
_snake_case = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_snake_case = roberta_layer.final_layer_norm.weight
_snake_case = roberta_layer.final_layer_norm.bias
# intermediate
_snake_case = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_snake_case = roberta_layer.fca.weight
_snake_case = roberta_layer.fca.bias
# output
_snake_case = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_snake_case = roberta_layer.fca.weight
_snake_case = roberta_layer.fca.bias
# end of layer
if classification_head:
_snake_case = roberta.model.classification_heads['''mnli'''].dense.weight
_snake_case = roberta.model.classification_heads['''mnli'''].dense.bias
_snake_case = roberta.model.classification_heads['''mnli'''].out_proj.weight
_snake_case = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_snake_case = roberta.model.encoder.lm_head.dense.weight
_snake_case = roberta.model.encoder.lm_head.dense.bias
_snake_case = roberta.model.encoder.lm_head.layer_norm.weight
_snake_case = roberta.model.encoder.lm_head.layer_norm.bias
_snake_case = roberta.model.encoder.lm_head.weight
_snake_case = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_snake_case = roberta.encode(lowerCAmelCase_ ).unsqueeze(0 ) # batch of size 1
_snake_case = model(lowerCAmelCase_ )[0]
if classification_head:
_snake_case = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase_ ) )
else:
_snake_case = roberta.model(lowerCAmelCase_ )[0]
print(our_output.shape , their_output.shape )
_snake_case = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_snake_case = torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(lowerCAmelCase_ ).mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
snake_case = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 103
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = '''bert-generation'''
def __init__( self : Tuple , __lowerCamelCase : Optional[int]=5_0_3_5_8 , __lowerCamelCase : List[str]=1_0_2_4 , __lowerCamelCase : Optional[Any]=2_4 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Tuple=1E-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]="absolute" , __lowerCamelCase : str=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
| 103
| 1
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list ):
__lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = collection[i]
__lowerCAmelCase : int = 0
__lowerCAmelCase : str = i - 1
while low <= high:
__lowerCAmelCase : str = (low + high) // 2
if val < collection[mid]:
__lowerCAmelCase : Optional[int] = mid - 1
else:
__lowerCAmelCase : Optional[int] = mid + 1
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , -1 ):
__lowerCAmelCase : Union[str, Any] = collection[j - 1]
__lowerCAmelCase : List[Any] = val
return collection
if __name__ == "__main__":
_UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
_UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 706
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class snake_case_ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCAmelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCAmelCase : List[Any] = CLIPTextModel(_snake_case )
__lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase : Tuple = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , _snake_case : str , _snake_case : int=0 )->str:
'''simple docstring'''
if str(_snake_case ).startswith("""mps""" ):
__lowerCAmelCase : int = torch.manual_seed(_snake_case )
else:
__lowerCAmelCase : Optional[int] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
__lowerCAmelCase : Any = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Tuple = Image.fromarray(np.uinta(_snake_case ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCAmelCase : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : Dict )->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Optional[int] )->List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ):
A_ = StableDiffusionControlNetImgaImgPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCAmelCase__ ( self : Tuple )->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_snake_case : Optional[Any] ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCAmelCase : Dict = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCAmelCase : Optional[Any] = CLIPTextModel(_snake_case )
__lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase : List[Any] = MultiControlNetModel([controlneta, controlneta] )
__lowerCAmelCase : List[str] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : List[Any] , _snake_case : Dict , _snake_case : List[Any]=0 )->int:
'''simple docstring'''
if str(_snake_case ).startswith("""mps""" ):
__lowerCAmelCase : int = torch.manual_seed(_snake_case )
else:
__lowerCAmelCase : Optional[int] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
__lowerCAmelCase : int = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
__lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Optional[int] = Image.fromarray(np.uinta(_snake_case ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCAmelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] )->str:
'''simple docstring'''
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
__lowerCAmelCase : Any = 10.0
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : List[Any] = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : int = steps
__lowerCAmelCase : Tuple = scale
__lowerCAmelCase : Optional[int] = pipe(**_snake_case )[0]
__lowerCAmelCase : str = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : List[Any] = steps
__lowerCAmelCase : Optional[Any] = scale
__lowerCAmelCase : Any = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCAmelCase : str = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : Optional[Any] = steps
__lowerCAmelCase : str = scale
__lowerCAmelCase : str = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_snake_case )
__lowerCAmelCase : Optional[int] = steps
__lowerCAmelCase : Union[str, Any] = scale
__lowerCAmelCase : List[Any] = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCAmelCase__ ( self : Tuple )->Dict:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : Tuple )->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : Union[str, Any] )->List[str]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCAmelCase__ ( self : str )->Tuple:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] )->Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : str = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
__lowerCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase : List[str] = """evil space-punk bird"""
__lowerCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
__lowerCAmelCase : List[Any] = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
__lowerCAmelCase : int = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
__lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
__lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 240
| 0
|
from sklearn.metrics import mean_squared_error
import datasets
_snake_case : Tuple = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
_snake_case : Optional[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
_snake_case : int = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
], )
def __lowerCAmelCase ( self ) -> List[str]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __lowerCAmelCase ( self, _a, _a, _a=None, _a="uniform_average", _a=True ) -> Tuple:
__SCREAMING_SNAKE_CASE = mean_squared_error(
_lowercase, _lowercase, sample_weight=_lowercase, multioutput=_lowercase, squared=_lowercase )
return {"mse": mse}
| 693
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35
| 0
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
__A = float(__a )
except ValueError:
raise ValueError('''Please enter a valid number''' )
__A = decimal - int(__a )
if fractional_part == 0:
return int(__a ), 1
else:
__A = len(str(__a ).split('''.''' )[1] )
__A = int(decimal * (1_0**number_of_frac_digits) )
__A = 1_0**number_of_frac_digits
__A , __A = denominator, numerator
while True:
__A = dividend % divisor
if remainder == 0:
break
__A , __A = divisor, remainder
__A , __A = numerator / divisor, denominator / divisor
return int(__a ), int(__a )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 720
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Optional[torch.FloatTensor] = None
A_ : torch.FloatTensor = None
A_ : Optional[Tuple[torch.FloatTensor]] = None
A_ : Optional[Tuple[torch.FloatTensor]] = None
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple, _lowerCamelCase : List[str]=1, _lowerCamelCase : Union[str, Any]=0, _lowerCamelCase : List[str]=2, _lowerCamelCase : Optional[int]=5_12, _lowerCamelCase : Optional[Any]="cls", _lowerCamelCase : List[str]=False, _lowerCamelCase : Optional[Any]=True, **_lowerCamelCase : Any, ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase, bos_token_id=_lowerCamelCase, eos_token_id=_lowerCamelCase, **_lowerCamelCase )
__A = project_dim
__A = pooler_fn
__A = learn_encoder
__A = use_attention_mask
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : int = [R"pooler", R"logit_scale"]
A_ : List[Any] = [R"position_ids", R"predictions.decoder.bias"]
A_ : Union[str, Any] = "roberta"
A_ : Dict = RobertaSeriesConfig
def __init__( self : Optional[Any], _lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(_lowerCamelCase )
__A = XLMRobertaModel(_lowerCamelCase )
__A = nn.Linear(config.hidden_size, config.project_dim )
__A = getattr(_lowerCamelCase, '''has_pre_transformation''', _lowerCamelCase )
if self.has_pre_transformation:
__A = nn.Linear(config.hidden_size, config.project_dim )
__A = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps )
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[torch.Tensor] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[bool] = None, _lowerCamelCase : Optional[bool] = None, ):
'''simple docstring'''
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.base_model(
input_ids=_lowerCamelCase, attention_mask=_lowerCamelCase, token_type_ids=_lowerCamelCase, position_ids=_lowerCamelCase, head_mask=_lowerCamelCase, inputs_embeds=_lowerCamelCase, encoder_hidden_states=_lowerCamelCase, encoder_attention_mask=_lowerCamelCase, output_attentions=_lowerCamelCase, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=_lowerCamelCase, )
if self.has_pre_transformation:
__A = outputs['''hidden_states'''][-2]
__A = self.pre_LN(_lowerCamelCase )
__A = self.transformation_pre(_lowerCamelCase )
return TransformationModelOutput(
projection_state=_lowerCamelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
else:
__A = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_lowerCamelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
| 215
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_lowerCAmelCase : int = {"facebook/blenderbot_small-90M": 5_12}
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Dict = set()
_UpperCAmelCase : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase : Tuple = char
_UpperCAmelCase : Optional[int] = set(SCREAMING_SNAKE_CASE__ )
return pairs
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = ['input_ids', 'attention_mask']
def __init__( self : List[str] , A : Any , A : Tuple , A : Tuple="__start__" , A : int="__end__" , A : str="__unk__" , A : Dict="__null__" , **A : List[str] , ):
super().__init__(unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase : Tuple = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
_UpperCAmelCase : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
_UpperCAmelCase : Any = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase : List[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
_UpperCAmelCase : Dict = {}
@property
def snake_case_ ( self : Optional[int] ):
return len(self.encoder )
def snake_case_ ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : int , A : Optional[Any] ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase : int = re.sub("([.,!?()])" , R" \1" , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = re.sub("(\')" , R" \1 " , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = re.sub(R"\s{2,}" , " " , _SCREAMING_SNAKE_CASE )
if "\n" in token:
_UpperCAmelCase : Any = token.replace("\n" , " __newln__" )
_UpperCAmelCase : Union[str, Any] = token.split(" " )
_UpperCAmelCase : int = []
for token in tokens:
if not len(_SCREAMING_SNAKE_CASE ):
continue
_UpperCAmelCase : Dict = token.lower()
_UpperCAmelCase : Any = tuple(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_UpperCAmelCase : Dict = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
words.append(_SCREAMING_SNAKE_CASE )
continue
while True:
_UpperCAmelCase : Union[str, Any] = min(_SCREAMING_SNAKE_CASE , key=lambda A : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase : Any = bigram
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[int] = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
_UpperCAmelCase : int = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
_UpperCAmelCase : Dict = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase : str = tuple(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
_UpperCAmelCase : List[str] = get_pairs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = '''@@ '''.join(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = word[:-4]
_UpperCAmelCase : Any = word
words.append(_SCREAMING_SNAKE_CASE )
return " ".join(_SCREAMING_SNAKE_CASE )
def snake_case_ ( self : List[str] , A : Dict ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[Any] = re.findall(R"\S+\n?" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def snake_case_ ( self : str , A : List[Any] ):
_UpperCAmelCase : Optional[Any] = token.lower()
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : List[str] , A : Optional[int] ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def snake_case_ ( self : List[str] , A : Tuple ):
_UpperCAmelCase : List[str] = ''' '''.join(_SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def snake_case_ ( self : Optional[Any] , A : Optional[Any] , A : Dict = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Union[str, Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + "\n" )
_UpperCAmelCase : str = 0
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
_UpperCAmelCase : Tuple = token_index
writer.write(" ".join(_SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return vocab_file, merge_file
| 289
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 590
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 10**-10 ):
UpperCAmelCase : str = a
while True:
UpperCAmelCase : Optional[Any] = Decimal(__lowercase ) - (
Decimal(eval(__lowercase ) ) / Decimal(eval(str(diff(__lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowercase ) ) < precision: # noqa: S307
return float(__lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 704
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = """openai/whisper-base"""
UpperCAmelCase_ : Union[str, Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
UpperCAmelCase_ : Dict = """transcriber"""
UpperCAmelCase_ : int = WhisperProcessor
UpperCAmelCase_ : Optional[int] = WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = ["""audio"""]
UpperCAmelCase_ : Optional[int] = ["""text"""]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> Optional[int]:
return self.pre_processor(lowercase_ , return_tensors='pt' ).input_features
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int ) -> List[str]:
return self.model.generate(inputs=lowercase_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[Any] ) -> List[str]:
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 695
| 0
|
from collections.abc import Callable
def _lowerCAmelCase ( lowerCAmelCase_ :Callable[[float], float] , lowerCAmelCase_ :float , lowerCAmelCase_ :float )->float:
'''simple docstring'''
snake_case_ = a
snake_case_ = b
if function(lowerCAmelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase_ ) == 0:
return b
elif (
function(lowerCAmelCase_ ) * function(lowerCAmelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
snake_case_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase_ ) == 0:
return mid
elif function(lowerCAmelCase_ ) * function(lowerCAmelCase_ ) < 0:
snake_case_ = mid
else:
snake_case_ = mid
snake_case_ = start + (end - start) / 2.0
return mid
def _lowerCAmelCase ( lowerCAmelCase_ :float )->float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 283
|
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCAmelCase ( )->None:
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 283
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : str = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowercase_ ( lowerCAmelCase__ ):
__UpperCamelCase = "gptj"
__UpperCamelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: int, _lowercase: Optional[Any]=50400, _lowercase: Union[str, Any]=2048, _lowercase: Any=4096, _lowercase: Any=28, _lowercase: int=16, _lowercase: Dict=64, _lowercase: Optional[int]=None, _lowercase: Dict="gelu_new", _lowercase: Optional[Any]=0.0, _lowercase: Union[str, Any]=0.0, _lowercase: List[Any]=0.0, _lowercase: Tuple=1e-5, _lowercase: List[Any]=0.02, _lowercase: Union[str, Any]=True, _lowercase: Optional[int]=50256, _lowercase: Optional[int]=50256, _lowercase: Any=False, **_lowercase: Dict, ):
'''simple docstring'''
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=_lowercase, eos_token_id=_lowercase, tie_word_embeddings=_lowercase, **_lowercase)
class lowercase_ ( lowerCAmelCase__ ):
def __init__( self: Tuple, _lowercase: PretrainedConfig, _lowercase: str = "default", _lowercase: List[PatchingSpec] = None, _lowercase: bool = False, ):
'''simple docstring'''
super().__init__(_lowercase, task=_lowercase, patching_specs=_lowercase, use_past=_lowercase)
if not getattr(self._config, """pad_token_id""", _lowercase):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def _lowercase ( self: int):
'''simple docstring'''
__lowerCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}})
if self.use_past:
self.fill_with_past_key_values_(_lowercase, direction="""inputs""")
__lowerCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _lowercase ( self: Any):
'''simple docstring'''
return self._config.n_layer
@property
def _lowercase ( self: Union[str, Any]):
'''simple docstring'''
return self._config.n_head
def _lowercase ( self: Union[str, Any], _lowercase: PreTrainedTokenizer, _lowercase: int = -1, _lowercase: int = -1, _lowercase: bool = False, _lowercase: Optional[TensorType] = None, ):
'''simple docstring'''
__lowerCAmelCase = super(_lowercase, self).generate_dummy_inputs(
_lowercase, batch_size=_lowercase, seq_length=_lowercase, is_pair=_lowercase, framework=_lowercase)
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
__lowerCAmelCase , __lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(_lowercase), torch.zeros(_lowercase)) for _ in range(self.num_layers)
]
__lowerCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase = ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowercase, _lowercase, dtype=_lowercase)], dim=1)
return ordered_inputs
@property
def _lowercase ( self: Optional[Any]):
'''simple docstring'''
return 13
| 334
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : str = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
lowercase_ = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
lowercase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__snake_case = logging.getLogger(__name__)
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Dict:
if metric == "rouge2":
lowercase_ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
lowercase_ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
lowercase_ = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
lowercase_ = ModelCheckpoint(
dirpath=__lowerCamelCase , filename=__lowerCamelCase , monitor=f"""val_{metric}""" , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=__lowerCamelCase , verbose=__lowerCamelCase , )
class _a ( pl.Callback ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any , lowercase_ : Dict , lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase_ )
@rank_zero_only
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule , lowercase_ : str , lowercase_ : Any=True ):
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowercase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
lowercase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase_ = od / "test_results.txt"
lowercase_ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowercase_ = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowercase_ )
generations_file.parent.mkdir(exist_ok=lowercase_ )
with open(lowercase_ , """a+""" ) as writer:
for key in sorted(lowercase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ = metrics[key]
if isinstance(lowercase_ , torch.Tensor ):
lowercase_ = val.item()
lowercase_ = F"""{key}: {val:.6f}\n"""
writer.write(lowercase_ )
if not save_generations:
return
if "preds" in metrics:
lowercase_ = "\n".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase_ )
@rank_zero_only
def lowerCamelCase__ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ):
'''simple docstring'''
try:
lowercase_ = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ = pl_module.model.num_parameters()
lowercase_ = count_trainable_parameters(lowercase_ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : pl.Trainer , lowercase_ : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase_ , lowercase_ , """test""" )
@rank_zero_only
def lowerCamelCase__ ( self : Any , lowercase_ : pl.Trainer , lowercase_ : Union[str, Any] ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 451
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class a_ ( _lowerCAmelCase ):
__A = "distilbert"
__A = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : int , lowercase : Union[str, Any]=30_522 , lowercase : List[Any]=512 , lowercase : Tuple=False , lowercase : Dict=6 , lowercase : List[str]=12 , lowercase : Union[str, Any]=768 , lowercase : int=4 * 768 , lowercase : Union[str, Any]=0.1 , lowercase : List[str]=0.1 , lowercase : List[str]="gelu" , lowercase : Tuple=0.02 , lowercase : int=0.1 , lowercase : Any=0.2 , lowercase : List[Any]=0 , **lowercase : Optional[Any] , ):
"""simple docstring"""
lowercase_ :Optional[int] = vocab_size
lowercase_ :Optional[int] = max_position_embeddings
lowercase_ :List[Any] = sinusoidal_pos_embds
lowercase_ :Dict = n_layers
lowercase_ :List[str] = n_heads
lowercase_ :int = dim
lowercase_ :str = hidden_dim
lowercase_ :Tuple = dropout
lowercase_ :Any = attention_dropout
lowercase_ :Optional[int] = activation
lowercase_ :Dict = initializer_range
lowercase_ :int = qa_dropout
lowercase_ :Tuple = seq_classif_dropout
super().__init__(**lowercase , pad_token_id=lowercase )
class a_ ( _lowerCAmelCase ):
@property
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ :int = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase_ :Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 172
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : int = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 571
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
__lowerCamelCase = parent
__lowerCamelCase = 1_3
__lowerCamelCase = 7
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = 9_9
__lowerCamelCase = 3_2
__lowerCamelCase = 2
__lowerCamelCase = 4
__lowerCamelCase = 3_7
__lowerCamelCase = """gelu"""
__lowerCamelCase = 0.1
__lowerCamelCase = 0.1
__lowerCamelCase = 5_1_2
__lowerCamelCase = 1_6
__lowerCamelCase = 2
__lowerCamelCase = 0.02
__lowerCamelCase = 3
__lowerCamelCase = 4
__lowerCamelCase = None
def lowerCamelCase_ ( self ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TFRoFormerModel(config=UpperCAmelCase )
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowerCamelCase = [input_ids, input_mask]
__lowerCamelCase = model(UpperCAmelCase )
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = True
__lowerCamelCase = TFRoFormerForCausalLM(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TFRoFormerForMaskedLM(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForSequenceClassification(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_choices
__lowerCamelCase = TFRoFormerForMultipleChoice(config=UpperCAmelCase )
__lowerCamelCase = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowerCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = TFRoFormerForTokenClassification(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
__lowerCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowerCamelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase_ ( self ):
__lowerCamelCase = TFRoFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def lowerCamelCase_ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase_ ( self ):
__lowerCamelCase = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__lowerCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase = model(UpperCAmelCase )[0]
# TODO Replace vocab size
__lowerCamelCase = 5_0_0_0_0
__lowerCamelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__lowerCamelCase = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
A = 1e-4
def lowerCamelCase_ ( self ):
__lowerCamelCase = tf.constant([[4, 1_0]] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__lowerCamelCase = emba(input_ids.shape )
__lowerCamelCase = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def lowerCamelCase_ ( self ):
__lowerCamelCase = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__lowerCamelCase = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
A = 1e-4
def lowerCamelCase_ ( self ):
# 2,12,16,64
__lowerCamelCase = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__lowerCamelCase = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__lowerCamelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__lowerCamelCase = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__lowerCamelCase , __lowerCamelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
__lowerCamelCase = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 571
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = '''▁'''
UpperCAmelCase_ : List[str] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase_ : Dict = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
UpperCAmelCase_ : Optional[Any] = {
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
UpperCAmelCase_ : List[Any] = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Optional[Any] = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = ['''input_ids''', '''attention_mask''']
_lowercase : List[int] = []
_lowercase : List[int] = []
def __init__( self : Dict , __A : Optional[Any] , __A : Any , __A : Tuple=None , __A : List[str]=None , __A : int="<s>" , __A : Union[str, Any]="</s>" , __A : Optional[Any]="</s>" , __A : Tuple="<pad>" , __A : List[str]="<unk>" , __A : Optional[Any]="m2m100" , __A : Optional[Dict[str, Any]] = None , __A : Any=8 , **__A : List[str] , ):
__A : int = {} if sp_model_kwargs is None else sp_model_kwargs
__A : List[Any] = language_codes
__A : List[str] = FAIRSEQ_LANGUAGE_CODES[language_codes]
__A : str = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
__A : Optional[int] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__A )
for lang_code in fairseq_language_code
if self.get_lang_token(__A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__A , tgt_lang=__A , bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , language_codes=__A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__A , **__A , )
__A : Any = vocab_file
__A : Any = load_json(__A )
__A : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__A : Optional[int] = spm_file
__A : Dict = load_spm(__A , self.sp_model_kwargs )
__A : int = len(self.encoder )
__A : Optional[Any] = {
self.get_lang_token(__A ): self.encoder_size + i for i, lang_code in enumerate(__A )
}
__A : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__A )}
__A : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
__A : Tuple = src_lang if src_lang is not None else """en"""
__A : Dict = tgt_lang
__A : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__A : str = num_madeup_words
@property
def lowerCAmelCase_ ( self : Any ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase_ ( self : int ):
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self : int , __A : str ):
__A : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self : str , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def lowerCAmelCase_ ( self : str , __A : Union[str, Any] ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__A , self.encoder[self.unk_token] )
def lowerCAmelCase_ ( self : int , __A : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__A , self.unk_token )
def lowerCAmelCase_ ( self : Tuple , __A : Optional[Any] ):
__A : Dict = []
__A : Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__A ) + token
__A : Dict = []
else:
current_sub_tokens.append(__A )
out_string += self.sp_model.decode(__A )
return out_string.strip()
def lowerCAmelCase_ ( self : List[str] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
__A : Optional[Any] = [1] * len(self.prefix_tokens )
__A : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def lowerCAmelCase_ ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self : List[str] ):
__A : Tuple = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
__A : List[str] = self.__dict__.copy()
__A : Union[str, Any] = None
return state
def __setstate__( self : Dict , __A : Dict ):
__A : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__A : Union[str, Any] = {}
__A : str = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase_ ( self : str , __A : str , __A : Optional[str] = None ):
__A : str = Path(__A )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
__A : str = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__A : Dict = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __A )
if os.path.abspath(self.spm_file ) != os.path.abspath(__A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __A )
elif not os.path.isfile(self.spm_file ):
with open(__A , """wb""" ) as fi:
__A : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (str(__A ), str(__A ))
def lowerCAmelCase_ ( self : Union[str, Any] , __A : List[str] , __A : str = "en" , __A : Optional[List[str]] = None , __A : str = "ro" , **__A : Optional[int] , ):
__A : Any = src_lang
__A : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__A , __A , **__A )
def lowerCAmelCase_ ( self : int , __A : Dict , __A : Optional[str] , __A : Optional[str] , **__A : List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__A : List[Any] = src_lang
__A : str = self(__A , add_special_tokens=__A , **__A )
__A : Optional[int] = self.get_lang_id(__A )
__A : Optional[Any] = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self : List[str] ):
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self : Dict , __A : str ):
__A : Any = self.get_lang_token(__A )
__A : Any = self.lang_token_to_id[lang_token]
__A : Any = [self.cur_lang_id]
__A : Optional[Any] = [self.eos_token_id]
def lowerCAmelCase_ ( self : int , __A : str ):
__A : Tuple = self.get_lang_token(__A )
__A : Dict = self.lang_token_to_id[lang_token]
__A : Union[str, Any] = [self.cur_lang_id]
__A : str = [self.eos_token_id]
def lowerCAmelCase_ ( self : Tuple , __A : str ):
return self.lang_code_to_token[lang]
def lowerCAmelCase_ ( self : str , __A : str ):
__A : List[Any] = self.get_lang_token(__A )
return self.lang_token_to_id[lang_token]
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
__A : Optional[int] = sentencepiece.SentencePieceProcessor(**a__ )
spm.Load(str(a__ ) )
return spm
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Union[Dict, List]:
with open(a__ ,"""r""" ) as f:
return json.load(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : str ) -> None:
with open(a__ ,"""w""" ) as f:
json.dump(a__ ,a__ ,indent=2 )
| 17
|
lowerCamelCase :Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[str]:
_a = set()
# keep track of all the paths to be checked
_a = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a = queue.pop(0 )
# get the last node from the path
_a = path[-1]
if node not in explored:
_a = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a = list(_UpperCamelCase )
new_path.append(_UpperCamelCase )
queue.append(_UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a = [start]
_a = set(_UpperCamelCase )
# Keep tab on distances from `start` node.
_a = {start: 0, target: -1}
while queue:
_a = queue.pop(0 )
if node == target:
_a = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCamelCase )
queue.append(_UpperCamelCase )
_a = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 487
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : Union[str, Any] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = ['OwlViTFeatureExtractor']
_lowerCAmelCase : List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase ( a ):
_lowerCamelCase : int = """xmod"""
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.0_2 , snake_case__=1e-1_2 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=2 , snake_case__=False , snake_case__=True , snake_case__=True , snake_case__=("en_XX",) , snake_case__=None , **snake_case__ , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = position_embedding_type
lowerCAmelCase : Optional[Any] = use_cache
lowerCAmelCase : Union[str, Any] = classifier_dropout
lowerCAmelCase : int = pre_norm
lowerCAmelCase : Optional[Any] = adapter_reduction_factor
lowerCAmelCase : Any = adapter_layer_norm
lowerCAmelCase : Dict = adapter_reuse_layer_norm
lowerCAmelCase : Any = ln_before_adapter
lowerCAmelCase : Optional[Any] = list(snake_case__ )
lowerCAmelCase : List[Any] = default_language
class lowerCAmelCase ( a ):
@property
def lowercase ( self ):
if self.task == "multiple-choice":
lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 646
| 0
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE__ : Union[str, Any] =logging.getLogger(__name__)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ) ->List[Any]:
_lowerCamelCase : Any = bnb_quantization_config.load_in_abit
_lowerCamelCase : List[str] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
_lowerCamelCase : int = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
_lowerCamelCase : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_lowerCamelCase : List[Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_lowerCamelCase : Tuple = []
_lowerCamelCase : Union[str, Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
_lowerCamelCase : Optional[Any] = load_in_abit
_lowerCamelCase : List[str] = load_in_abit
_lowerCamelCase : Union[str, Any] = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
_lowerCamelCase : int = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
_lowerCamelCase : List[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_lowerCamelCase : Dict = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
_lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_lowerCamelCase : List[Any] = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->Any:
if device_map is None:
if torch.cuda.is_available():
_lowerCamelCase : List[Any] = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
_lowerCamelCase : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Union[str, Any] = special_dtypes
_lowerCamelCase : Dict = no_split_module_classes
_lowerCamelCase : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_lowerCamelCase : int = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
_lowerCamelCase : Union[str, Any] = max_memory
_lowerCamelCase : Any = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
_lowerCamelCase : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_lowerCamelCase : str = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->Any:
if modules_to_not_convert is None:
_lowerCamelCase : Tuple = []
_lowerCamelCase, _lowerCamelCase : Tuple = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ) ->Optional[int]:
_lowerCamelCase : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
_lowerCamelCase : Any = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_lowerCamelCase : Tuple = '''.'''.join(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_lowerCamelCase : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_lowerCamelCase : int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_lowerCamelCase : Optional[Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
_lowerCamelCase : List[Any] = module.weight.data
if module.bias is not None:
_lowerCamelCase : List[Any] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = True
if len(list(module.children() ) ) > 0:
_lowerCamelCase, _lowerCamelCase : int = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
# Create a copy of the model
with init_empty_weights():
_lowerCamelCase : List[Any] = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_lowerCamelCase : Tuple = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCamelCase : Tuple = sum(SCREAMING_SNAKE_CASE_ , [] )
_lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
_lowerCamelCase : Union[str, Any] = False
if hasattr(SCREAMING_SNAKE_CASE_ , '''base_model_prefix''' ):
_lowerCamelCase : str = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCamelCase : Any = list(model.named_children() )
_lowerCamelCase : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
_lowerCamelCase : Optional[int] = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Dict = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
_lowerCamelCase : Union[str, Any] = ['''.weight''', '''.bias''']
_lowerCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCamelCase : Any = name.replace(SCREAMING_SNAKE_CASE_ , '''''' )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
return next(parameter.parameters() ).device
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = param_name
_lowerCamelCase : Dict = model
if "." in tensor_name:
_lowerCamelCase : Dict = tensor_name.split('''.''' )
for split in splits[:-1]:
_lowerCamelCase : List[str] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_lowerCamelCase : List[str] = new_module
_lowerCamelCase : Dict = splits[-1]
# offload weights
_lowerCamelCase : Tuple = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''meta''' , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 434
|
"""simple docstring"""
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE__ : Optional[Any] =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE__ : str =(((515, 22, 13), 555), ((61, 35, 49), 150))
SCREAMING_SNAKE_CASE__ : int =[2, 4, 1, 5]
SCREAMING_SNAKE_CASE__ : Any =len(train_data)
SCREAMING_SNAKE_CASE__ : List[Any] =0.009
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="train" ) ->List[str]:
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - output(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
_lowerCamelCase : int = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=m ) ->List[str]:
_lowerCamelCase : Tuple = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE_ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE_ ) * train_data[i][0][index]
return summation_value
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
_lowerCamelCase : Optional[Any] = summation_of_cost_derivative(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / m
return cost_derivative_value
def UpperCamelCase ( ) ->Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_lowerCamelCase : Dict = 0.000002
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Union[str, Any] = 0
while True:
j += 1
_lowerCamelCase : str = [0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCamelCase : Optional[int] = get_cost_derivative(i - 1 )
_lowerCamelCase : Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ , rtol=SCREAMING_SNAKE_CASE_ , ):
break
_lowerCamelCase : List[str] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def UpperCamelCase ( ) ->Optional[Any]:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
print(('''Actual output value:''', output(SCREAMING_SNAKE_CASE_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 434
| 1
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase_ = '''examples/'''
lowerCAmelCase_ = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCAmelCase_ = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCAmelCase_ = '''README.md'''
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE,'r',encoding='utf-8',newline='\n' ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase , _UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace('VERSION',SCREAMING_SNAKE_CASE )
_UpperCAmelCase = re_pattern.sub(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE,'w',encoding='utf-8',newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ),SCREAMING_SNAKE_CASE,pattern='examples' )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = '🤗 Transformers currently provides the following architectures'
_UpperCAmelCase = '1. Want to contribute a new model?'
with open(SCREAMING_SNAKE_CASE,'r',encoding='utf-8',newline='\n' ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_UpperCAmelCase = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc','https://huggingface.co/docs/diffusers/model_doc',)
index += 1
with open(SCREAMING_SNAKE_CASE,'w',encoding='utf-8',newline='\n' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
with open(REPLACE_FILES['init'],'r' ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS['init'][0].search(SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(SCREAMING_SNAKE_CASE ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE,patch=SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(SCREAMING_SNAKE_CASE ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 705
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for param in module.parameters():
_UpperCAmelCase = False
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = datetime.now()
_UpperCAmelCase = current_time.strftime('%H:%M:%S' )
return timestamp
| 494
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCamelCase__ : Dict = logging.getLogger(__name__)
lowerCamelCase__ : Union[str, Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCamelCase__ : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case_ )} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} ,)
__lowercase : str = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
def SCREAMING_SNAKE_CASE__ ( self:str ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=snake_case_ ,metadata={'help': 'The input training data file (a text file).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowercase : Optional[int] = field(
default=5 ,metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={'help': 'The number of processes to use for the preprocessing.'} ,)
__lowercase : float = field(
default=0.15 ,metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__lowercase : bool = field(
default=snake_case_ ,metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} ,)
def SCREAMING_SNAKE_CASE__ ( self:str ):
if self.train_file is not None:
snake_case__ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case__ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case__ = [json.loads(__lowerCAmelCase ) for line in f.read().splitlines() if (len(__lowerCAmelCase ) > 0 and not line.isspace())]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
snake_case__ = {c: dataset[c] for c in dataset.column_names}
snake_case__ = refs
return Dataset.from_dict(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
snake_case__ = {}
if data_args.train_file is not None:
snake_case__ = data_args.train_file
if data_args.validation_file is not None:
snake_case__ = data_args.validation_file
snake_case__ = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
snake_case__ = '''text'''
snake_case__ = load_dataset(__lowerCAmelCase , data_files=__lowerCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case__ = AutoConfig.from_pretrained(model_args.config_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
snake_case__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
snake_case__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
snake_case__ = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
snake_case__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
snake_case__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
snake_case__ = AutoModelForMaskedLM.from_config(__lowerCAmelCase )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case__ = datasets['''train'''].column_names
else:
snake_case__ = datasets['''validation'''].column_names
snake_case__ = '''text''' if '''text''' in column_names else column_names[0]
snake_case__ = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowerCAmelCase ):
# Remove empty lines
snake_case__ = [line for line in examples['''text'''] if len(__lowerCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=data_args.max_seq_length )
snake_case__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case__ = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case__ = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case__ = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case__ = DataCollatorForWholeWordMask(tokenizer=__lowerCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case__ = model_args.model_name_or_path
else:
snake_case__ = None
snake_case__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case__ = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
snake_case__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ = trainer.evaluate()
snake_case__ = math.exp(eval_output['''eval_loss'''] )
snake_case__ = perplexity
snake_case__ = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 33
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 67
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCamelCase = '''base_with_context'''
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCAmelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCAmelCase = weights[F"""layers_{lyr_num}"""]
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowerCAmelCase = ly_weight['attention']
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCAmelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCAmelCase = weights[F"""layers_{lyr_num}"""]
__lowerCAmelCase = ly_weight['attention']
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=UpperCAmelCase__ )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowerCAmelCase = weights[F"""layers_{lyr_num}"""]
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__lowerCAmelCase = ly_weight['self_attention']
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowerCAmelCase = ly_weight['MultiHeadDotProductAttention_0']
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowerCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__lowerCAmelCase = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowerCAmelCase = jnp.tree_util.tree_map(onp.array , UpperCAmelCase__ )
__lowerCAmelCase = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__lowerCAmelCase = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__lowerCAmelCase = inference.parse_training_gin_file(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase__ )
__lowerCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__lowerCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__lowerCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__lowerCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__lowerCAmelCase = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , UpperCAmelCase__ )
__lowerCAmelCase = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , UpperCAmelCase__ )
__lowerCAmelCase = load_decoder(ta_checkpoint['target']['decoder'] , UpperCAmelCase__ )
__lowerCAmelCase = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__lowerCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=UpperCAmelCase__ , continuous_encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , melgan=UpperCAmelCase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
lowerCamelCase = parser.parse_args()
main(args)
| 102
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case_ :
"""simple docstring"""
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_a )} , )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCAmelCase =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def A__ ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class snake_case_ :
"""simple docstring"""
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase =field(default=_a , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCAmelCase =field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCAmelCase =field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def A__ ( self ):
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
with open(UpperCAmelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(UpperCAmelCase__ ) for line in f.read().splitlines() if (len(UpperCAmelCase__ ) > 0 and not line.isspace())]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(UpperCAmelCase__ )
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(UpperCAmelCase__ , data_files=UpperCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase__ ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(UpperCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102
| 1
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 103
|
from math import pow, sqrt
def lowerCAmelCase( *__lowerCamelCase ):
__a = len(__lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 559
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''gptj'''
__UpperCAmelCase : Any = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a_=5_0400 , a_=2048 , a_=4096 , a_=28 , a_=16 , a_=64 , a_=None , a_="gelu_new" , a_=0.0 , a_=0.0 , a_=0.0 , a_=1E-5 , a_=0.02 , a_=True , a_=5_0256 , a_=5_0256 , a_=False , **a_ , ):
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Any = n_positions
lowerCamelCase_ : Tuple = n_embd
lowerCamelCase_ : List[Any] = n_layer
lowerCamelCase_ : Tuple = n_head
lowerCamelCase_ : Optional[Any] = n_inner
lowerCamelCase_ : Optional[Any] = rotary_dim
lowerCamelCase_ : Dict = activation_function
lowerCamelCase_ : List[str] = resid_pdrop
lowerCamelCase_ : List[str] = embd_pdrop
lowerCamelCase_ : Tuple = attn_pdrop
lowerCamelCase_ : Optional[int] = layer_norm_epsilon
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : int = use_cache
lowerCamelCase_ : int = bos_token_id
lowerCamelCase_ : List[Any] = eos_token_id
super().__init__(
bos_token_id=a_ , eos_token_id=a_ , tie_word_embeddings=a_ , **a_ )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , a_ , a_ = "default" , a_ = None , a_ = False , ):
super().__init__(a_ , task=a_ , patching_specs=a_ , use_past=a_ )
if not getattr(self._config , "pad_token_id" , a_ ):
# TODO: how to do that better?
lowerCamelCase_ : int = 0
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
lowerCamelCase_ : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCamelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCamelCase ( self ):
return self._config.n_layer
@property
def _UpperCamelCase ( self ):
return self._config.n_head
def _UpperCamelCase ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ):
lowerCamelCase_ : str = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase_ : Dict = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase_ : Tuple = seqlen + 2
lowerCamelCase_ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase_ : List[str] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
lowerCamelCase_ : Union[str, Any] = common_inputs["attention_mask"]
if self.use_past:
lowerCamelCase_ : Optional[int] = ordered_inputs["attention_mask"].dtype
lowerCamelCase_ : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def _UpperCamelCase ( self ):
return 13
| 73
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
# setable values
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None
__UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def _UpperCamelCase ( cls ):
return cls()
@dataclass
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : KarrasVeSchedulerState
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
return True
@register_to_config
def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ):
pass
def _UpperCamelCase ( self ):
return KarrasVeSchedulerState.create()
def _UpperCamelCase ( self , a_ , a_ , a_ = () ):
lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy()
lowerCamelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 )
lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape )
lowerCamelCase_ : List[str] = sigma + gamma * sigma
lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ):
lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output
lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
raise NotImplementedError()
| 73
| 1
|
def _lowerCAmelCase ( _lowerCAmelCase = 2_0_0 ):
'''simple docstring'''
A_ : List[Any] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
A_ : List[str] = [0] * (pence + 1)
A_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowerCAmelCase ,pence + 1 ,1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 569
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
A_ : Tuple = inspect.getfile(accelerate.test_utils )
A_ : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
A_ : Dict = test_metrics
@require_cpu
def _lowerCamelCase ( self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _lowerCamelCase ( self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _lowerCamelCase ( self ):
self.test_metrics.main()
@require_multi_gpu
def _lowerCamelCase ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
A_ : str = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a__ , env=os.environ.copy() )
| 569
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a_ :
UpperCAmelCase : Optional[int] = MBartConfig
UpperCAmelCase : Any = {}
UpperCAmelCase : Dict = """gelu"""
def __init__( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any]=1_3 , a_ : Dict=7 , a_ : Tuple=True , a_ : Tuple=False , a_ : Optional[int]=9_9 , a_ : List[Any]=3_2 , a_ : str=2 , a_ : Tuple=4 , a_ : int=3_7 , a_ : int=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=2_0 , a_ : Tuple=2 , a_ : Union[str, Any]=1 , a_ : Tuple=0 , ) -> Any:
snake_case: Optional[int] =parent
snake_case: Optional[Any] =batch_size
snake_case: Any =seq_length
snake_case: int =is_training
snake_case: Optional[Any] =use_labels
snake_case: Tuple =vocab_size
snake_case: int =hidden_size
snake_case: List[str] =num_hidden_layers
snake_case: Union[str, Any] =num_attention_heads
snake_case: Any =intermediate_size
snake_case: Dict =hidden_dropout_prob
snake_case: str =attention_probs_dropout_prob
snake_case: Any =max_position_embeddings
snake_case: Optional[Any] =eos_token_id
snake_case: Optional[Any] =pad_token_id
snake_case: List[str] =bos_token_id
def UpperCamelCase ( self : List[str] ) -> int:
snake_case: Any =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case: int =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case: int =tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case: int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: int =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case: List[str] =prepare_mbart_inputs_dict(__a , __a , __a )
return config, inputs_dict
def UpperCamelCase ( self : Tuple , a_ : List[Any] , a_ : Any ) -> Optional[int]:
snake_case: Union[str, Any] =TFMBartModel(config=__a ).get_decoder()
snake_case: Any =inputs_dict['input_ids']
snake_case: Union[str, Any] =input_ids[:1, :]
snake_case: Tuple =inputs_dict['attention_mask'][:1, :]
snake_case: List[str] =inputs_dict['head_mask']
snake_case: Union[str, Any] =1
# first forward pass
snake_case: Union[str, Any] =model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
snake_case , snake_case: Dict =outputs.to_tuple()
snake_case: Optional[int] =past_key_values[1]
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
snake_case: List[Any] =tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case: int =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case: Optional[int] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case: Tuple =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case: Dict =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case , snake_case , unittest.TestCase ):
UpperCAmelCase : Dict = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase : Optional[Any] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Any = True
UpperCAmelCase : str = False
UpperCAmelCase : Tuple = False
def UpperCamelCase ( self : Dict , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Dict , a_ : Tuple , a_ : Tuple ) -> Dict:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase ( self : Optional[int] ) -> Tuple:
snake_case: Union[str, Any] =TFMBartModelTester(self )
snake_case: Any =ConfigTester(self , config_class=__a )
def UpperCamelCase ( self : List[Any] ) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
snake_case: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_sentencepiece
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
UpperCAmelCase : str = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
UpperCAmelCase : Dict = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
UpperCAmelCase : List[str] = """facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase ( self : int ) -> List[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self : Optional[Any] ) -> str:
snake_case: Any =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase ( self : Tuple , **a_ : Optional[Any] ) -> List[str]:
snake_case: Any =self.translate_src_text(**__a )
self.assertListEqual(self.expected_text , __a )
def UpperCamelCase ( self : List[str] , **a_ : Optional[int] ) -> Dict:
snake_case: Union[str, Any] =self.tokenizer(self.src_text , **__a , return_tensors='tf' )
snake_case: List[str] =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case: Any =self.tokenizer.batch_decode(__a , skip_special_tokens=__a )
return generated_words
@slow
def UpperCamelCase ( self : int ) -> Any:
self._assert_generated_batch_equal_expected()
| 712
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a = False
class a_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : str ) -> Dict:
snake_case: int =VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
snake_case: str =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
snake_case: str =torch.manual_seed(0 )
snake_case: Dict =pipe.dual_guided(
prompt='first prompt' , image=a_ , text_to_image_strength=0.7_5 , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
snake_case: Optional[int] =VersatileDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
snake_case: Optional[Any] =generator.manual_seed(0 )
snake_case: Any =pipe.dual_guided(
prompt='first prompt' , image=a_ , text_to_image_strength=0.7_5 , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
snake_case: Any =VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
snake_case: List[Any] ='cyberpunk 2077'
snake_case: Tuple =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
snake_case: Tuple =torch.manual_seed(0 )
snake_case: Any =pipe.dual_guided(
prompt=a_ , image=a_ , text_to_image_strength=0.7_5 , generator=a_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
snake_case: Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case: int =np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
snake_case: Dict ='A painting of a squirrel eating a burger '
snake_case: Union[str, Any] =torch.manual_seed(0 )
snake_case: Optional[int] =pipe.text_to_image(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
snake_case: Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case: int =np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
snake_case: Any =pipe.image_variation(a_ , generator=a_ , output_type='numpy' ).images
snake_case: Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case: str =np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 347
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a : Union[str, Any] = logging.get_logger(__name__)
a : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a : Optional[int] = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a : Optional[Any] = {
'RUCAIBox/mvp': 1024,
}
class __UpperCAmelCase( __lowercase ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["input_ids", "attention_mask"]
__lowerCamelCase = MvpTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
lowercase__ : int= json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
lowercase__ : str= getattr(snake_case__ , pre_tok_state.pop("type" ) )
lowercase__ : Tuple= add_prefix_space
lowercase__ : Union[str, Any]= pre_tok_class(**snake_case__ )
lowercase__ : Optional[int]= add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ : Tuple= "post_processor"
lowercase__ : Any= getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
lowercase__ : Tuple= json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ : str= tuple(state["sep"] )
if "cls" in state:
lowercase__ : Dict= tuple(state["cls"] )
lowercase__ : Optional[Any]= False
if state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
lowercase__ : Dict= add_prefix_space
lowercase__ : int= True
if state.get("trim_offsets" , snake_case__ ) != trim_offsets:
lowercase__ : int= trim_offsets
lowercase__ : Any= True
if changes_to_apply:
lowercase__ : List[Any]= getattr(snake_case__ , state.pop("type" ) )
lowercase__ : List[Any]= component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
lowercase__ : str= value
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
lowercase__ : Any= kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Optional[Any]= self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
lowercase__ : Union[str, Any]= [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : Optional[int]= [self.sep_token_id]
lowercase__ : Union[str, Any]= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 218
|
from __future__ import annotations
lowercase : Dict = tuple[int, int, int]
lowercase : List[str] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase : int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase : Optional[int] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowercase : Optional[Any] = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowercase : str = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowercase : Tuple = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowercase : Optional[int] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowercase : List[Any] = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowercase : Dict = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowercase : List[Any] = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowercase : Optional[Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowercase : Tuple = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT , _lowerCamelCase : str) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
if (unique_rotsel := len(set(_lowerCamelCase))) < 3:
__UpperCamelCase : Tuple = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(_lowerCamelCase)
# Checks if rotor positions are valid
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = rotpos
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(_lowerCamelCase)
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : int = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCamelCase)
if not 0 < rotorposa <= len(_lowerCamelCase):
__UpperCamelCase : List[Any] = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_lowerCamelCase)
# Validates string and returns dict
__UpperCamelCase : str = _plugboard(_lowerCamelCase)
return rotpos, rotsel, pbdict
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> dict[str, str]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'Plugboard setting isn\'t type string ({type(_lowerCamelCase)})'
raise TypeError(_lowerCamelCase)
elif len(_lowerCamelCase) % 2 != 0:
__UpperCamelCase : int = F'Odd number of symbols ({len(_lowerCamelCase)})'
raise Exception(_lowerCamelCase)
elif pbstring == "":
return {}
pbstring.replace(" " , "")
# Checks if all characters are unique
__UpperCamelCase : Optional[int] = set()
for i in pbstring:
if i not in abc:
__UpperCamelCase : Tuple = F'\'{i}\' not in list of symbols'
raise Exception(_lowerCamelCase)
elif i in tmppbl:
__UpperCamelCase : Tuple = F'Duplicate symbol ({i})'
raise Exception(_lowerCamelCase)
else:
tmppbl.add(_lowerCamelCase)
del tmppbl
# Created the dictionary
__UpperCamelCase : Union[str, Any] = {}
for j in range(0 , len(_lowerCamelCase) - 1 , 2):
__UpperCamelCase : Union[str, Any] = pbstring[j + 1]
__UpperCamelCase : str = pbstring[j]
return pb
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) , _lowerCamelCase : str = "" , ) -> str:
'''simple docstring'''
__UpperCamelCase : List[Any] = text.upper()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = _validator(
_lowerCamelCase , _lowerCamelCase , plugb.upper())
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple = rotor_position
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__UpperCamelCase : List[str] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__UpperCamelCase : Dict = plugboard[symbol]
# rotor ra --------------------------
__UpperCamelCase : Optional[int] = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : List[Any] = rotora[index % len(_lowerCamelCase)]
# rotor rb --------------------------
__UpperCamelCase : Dict = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : Any = rotora[index % len(_lowerCamelCase)]
# rotor rc --------------------------
__UpperCamelCase : str = abc.index(_lowerCamelCase) + rotorposa
__UpperCamelCase : Union[str, Any] = rotora[index % len(_lowerCamelCase)]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__UpperCamelCase : Union[str, Any] = reflector[symbol]
# 2nd rotors
__UpperCamelCase : Optional[int] = abc[rotora.index(_lowerCamelCase) - rotorposa]
__UpperCamelCase : Optional[Any] = abc[rotora.index(_lowerCamelCase) - rotorposa]
__UpperCamelCase : str = abc[rotora.index(_lowerCamelCase) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__UpperCamelCase : Optional[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : Any = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : Tuple = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase):
__UpperCamelCase : List[str] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCamelCase)
return "".join(_lowerCamelCase)
if __name__ == "__main__":
lowercase : Optional[Any] = 'This is my Python script that emulates the Enigma machine from WWII.'
lowercase : int = (1, 1, 1)
lowercase : Optional[Any] = 'pictures'
lowercase : Optional[Any] = (rotora, rotora, rotora)
lowercase : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 557
| 0
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__snake_case : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __lowercase):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 365
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
set_seed(7_70)
__snake_case : Union[str, Any] = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
__snake_case : int = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
__snake_case : Optional[int] = os.path.dirname(os.path.abspath(__file__))
__snake_case : Union[str, Any] = os.path.join(os.path.expanduser("""~"""), """.cache""")
__snake_case : Any = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int=False ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = model_type
if use_small:
key += "_small"
return os.path.join(UpperCamelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
hf_hub_download(repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , local_dir=UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : Union[str, Any]="text" ) -> Optional[Any]:
"""simple docstring"""
if model_type == "text":
lowerCAmelCase__ = BarkSemanticModel
lowerCAmelCase__ = BarkSemanticConfig
lowerCAmelCase__ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase__ = BarkCoarseModel
lowerCAmelCase__ = BarkCoarseConfig
lowerCAmelCase__ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase__ = BarkFineModel
lowerCAmelCase__ = BarkFineConfig
lowerCAmelCase__ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase__ = F"{model_type}_small" if use_small else model_type
lowerCAmelCase__ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(UpperCamelCase_ ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
# this is a hack
lowerCAmelCase__ = checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowerCAmelCase__ = model_args['vocab_size']
lowerCAmelCase__ = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase__ = model_args.pop('n_head' )
lowerCAmelCase__ = model_args.pop('n_embd' )
lowerCAmelCase__ = model_args.pop('n_layer' )
lowerCAmelCase__ = ConfigClass(**checkpoint['model_args'] )
lowerCAmelCase__ = ModelClass(config=UpperCamelCase_ )
lowerCAmelCase__ = GenerationConfigClass()
lowerCAmelCase__ = model_generation_config
lowerCAmelCase__ = checkpoint['model']
# fixup checkpoint
lowerCAmelCase__ = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(UpperCamelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase__ = k[len(UpperCamelCase_ ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase__ = new_k.replace(UpperCamelCase_ , new_layer_name_dict[old_layer_name] )
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
lowerCAmelCase__ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase__ = {k for k in extra_keys if not k.endswith('.attn.bias' )}
lowerCAmelCase__ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase__ = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(UpperCamelCase_ ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(UpperCamelCase_ ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
lowerCAmelCase__ = model.num_parameters(exclude_embeddings=UpperCamelCase_ )
lowerCAmelCase__ = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(UpperCamelCase_ , 3 )} loss" )
model.eval()
model.to(UpperCamelCase_ )
del checkpoint, state_dict
return model
def _UpperCamelCase ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=False , UpperCamelCase_ : Optional[int]="text" ) -> Dict:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase__ = 'cpu' # do conversion on cpu
lowerCAmelCase__ = _get_ckpt_path(UpperCamelCase_ , use_small=UpperCamelCase_ )
lowerCAmelCase__ = _load_model(UpperCamelCase_ , UpperCamelCase_ , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ )
# load bark initial model
lowerCAmelCase__ = _bark_load_model(UpperCamelCase_ , 'cpu' , model_type=UpperCamelCase_ , use_small=UpperCamelCase_ )
if model_type == "text":
lowerCAmelCase__ = bark_model['model']
if model.num_parameters(exclude_embeddings=UpperCamelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowerCAmelCase__ = 5
lowerCAmelCase__ = 10
if model_type in ["text", "coarse"]:
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCAmelCase__ = bark_model(UpperCamelCase_ )[0]
lowerCAmelCase__ = model(UpperCamelCase_ )
# take last logits
lowerCAmelCase__ = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase__ = 3
lowerCAmelCase__ = 8
lowerCAmelCase__ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCAmelCase__ = model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = bark_model(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = BarkSemanticConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = BarkCoarseConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = BarkFineConfig.from_pretrained(os.path.join(UpperCamelCase_ , 'config.json' ) )
lowerCAmelCase__ = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase__ = BarkSemanticModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = BarkCoarseModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = BarkFineModel.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowerCAmelCase__ = BarkConfig.from_sub_model_configs(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCAmelCase__ = BarkModel(UpperCamelCase_ )
lowerCAmelCase__ = semantic
lowerCAmelCase__ = coarseAcoustic
lowerCAmelCase__ = fineAcoustic
lowerCAmelCase__ = codec
lowerCAmelCase__ = bark_generation_config
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
bark.save_pretrained(UpperCamelCase_ , repo_id=UpperCamelCase_ , push_to_hub=UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
__snake_case : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 365
| 1
|
"""simple docstring"""
from __future__ import annotations
def __snake_case ( __A : str , __A : list[str] | None = None , __A : dict[str, float] | None = None , __A : bool = False , ) -> tuple[int, float, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = cipher_alphabet or [chr(__A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
SCREAMING_SNAKE_CASE : Any = {
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
SCREAMING_SNAKE_CASE : Optional[Any] = frequencies_dict
if not case_sensitive:
SCREAMING_SNAKE_CASE : List[str] = ciphertext.lower()
# Chi squared statistic values
SCREAMING_SNAKE_CASE : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(__A ) ):
SCREAMING_SNAKE_CASE : int = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
SCREAMING_SNAKE_CASE : str = (alphabet_letters.index(letter.lower() ) - shift) % len(
__A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
SCREAMING_SNAKE_CASE : int = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
SCREAMING_SNAKE_CASE : Optional[Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
SCREAMING_SNAKE_CASE : Tuple = decrypted_with_shift.lower().count(__A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
SCREAMING_SNAKE_CASE : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
SCREAMING_SNAKE_CASE : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
SCREAMING_SNAKE_CASE : Union[str, Any] = decrypted_with_shift.count(__A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
SCREAMING_SNAKE_CASE : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
SCREAMING_SNAKE_CASE : Tuple = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
SCREAMING_SNAKE_CASE : str = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
SCREAMING_SNAKE_CASE : int = min(
__A , key=__A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 265
|
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
A_ : Optional[int] = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[str] = None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (
os.path.join(_SCREAMING_SNAKE_CASE , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
SCREAMING_SNAKE_CASE : Dict = Extractor
def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.abspath(_SCREAMING_SNAKE_CASE )
return os.path.join(self.extract_dir , hash_url_to_filename(_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(_SCREAMING_SNAKE_CASE ) and not (os.path.isdir(_SCREAMING_SNAKE_CASE ) and os.listdir(_SCREAMING_SNAKE_CASE ))
)
def _lowerCAmelCase ( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool = False ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.extractor.infer_extractor_format(_SCREAMING_SNAKE_CASE )
if not extractor_format:
return input_path
SCREAMING_SNAKE_CASE : List[str] = self._get_output_path(_SCREAMING_SNAKE_CASE )
if self._do_extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.extractor.extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return output_path
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
@classmethod
@abstractmethod
def _lowerCAmelCase ( cls : Tuple , _SCREAMING_SNAKE_CASE : Union[Path, str] , **_SCREAMING_SNAKE_CASE : Tuple ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
...
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : int ) -> Any:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
return f.read(_SCREAMING_SNAKE_CASE )
@classmethod
def _lowerCAmelCase ( cls : str , _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
SCREAMING_SNAKE_CASE : Any = max(len(_SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
try:
SCREAMING_SNAKE_CASE : int = cls.read_magic_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except OSError:
return False
return any(magic_number.startswith(_SCREAMING_SNAKE_CASE ) for cls_magic_number in cls.magic_numbers )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls : List[str] , _SCREAMING_SNAKE_CASE : Union[Path, str] , **_SCREAMING_SNAKE_CASE : List[Any] ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(_SCREAMING_SNAKE_CASE )
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
def resolved(_SCREAMING_SNAKE_CASE : str ) -> str:
return os.path.realpath(os.path.abspath(_SCREAMING_SNAKE_CASE ) )
def badpath(_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).startswith(_SCREAMING_SNAKE_CASE )
def badlink(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> bool:
# Links are interpreted relative to the directory containing the link
SCREAMING_SNAKE_CASE : List[str] = resolved(os.path.join(_SCREAMING_SNAKE_CASE , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = resolved(_SCREAMING_SNAKE_CASE )
for finfo in members:
if badpath(finfo.name , _SCREAMING_SNAKE_CASE ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = tarfile.open(_SCREAMING_SNAKE_CASE )
tar_file.extractall(_SCREAMING_SNAKE_CASE , members=TarExtractor.safemembers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
tar_file.close()
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = [B'''\x1F\x8B''']
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(_SCREAMING_SNAKE_CASE , 'rb' ) as gzip_file:
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as extracted_file:
shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def _lowerCAmelCase ( cls : Tuple , _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(_SCREAMING_SNAKE_CASE , magic_number=_SCREAMING_SNAKE_CASE ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as fp:
SCREAMING_SNAKE_CASE : List[str] = _EndRecData(_SCREAMING_SNAKE_CASE )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
SCREAMING_SNAKE_CASE : str = fp.read(_SCREAMING_SNAKE_CASE ) # CD is where we expect it to be
if len(_SCREAMING_SNAKE_CASE ) == sizeCentralDir:
SCREAMING_SNAKE_CASE : Optional[Any] = struct.unpack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , 'r' ) as zip_file:
zip_file.extractall(_SCREAMING_SNAKE_CASE )
zip_file.close()
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(_SCREAMING_SNAKE_CASE ) as compressed_file:
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as extracted_file:
shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : str = rarfile.RarFile(_SCREAMING_SNAKE_CASE )
rf.extractall(_SCREAMING_SNAKE_CASE )
rf.close()
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
SCREAMING_SNAKE_CASE : Any = zstd.ZstdDecompressor()
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as ifh, open(_SCREAMING_SNAKE_CASE , 'wb' ) as ofh:
dctx.copy_stream(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = [B'''\x42\x5A\x68''']
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(_SCREAMING_SNAKE_CASE , 'rb' ) as compressed_file:
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as extracted_file:
shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with pyazr.SevenZipFile(_SCREAMING_SNAKE_CASE , 'r' ) as archive:
archive.extractall(_SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = [B'''\x04\x22\x4D\x18''']
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(_SCREAMING_SNAKE_CASE , 'rb' ) as compressed_file:
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as extracted_file:
shutil.copyfileobj(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowerCAmelCase ( cls : List[str] ) -> Tuple:
"""simple docstring"""
return max(
len(_SCREAMING_SNAKE_CASE )
for extractor in cls.extractors.values()
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowerCAmelCase ( _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(_SCREAMING_SNAKE_CASE , magic_number_length=_SCREAMING_SNAKE_CASE )
except OSError:
return b""
@classmethod
def _lowerCAmelCase ( cls : Any , _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : Optional[Any] = cls.infer_extractor_format(_SCREAMING_SNAKE_CASE )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = cls._get_magic_number_max_length()
SCREAMING_SNAKE_CASE : str = cls._read_magic_number(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_SCREAMING_SNAKE_CASE , magic_number=_SCREAMING_SNAKE_CASE ):
return extractor_format
@classmethod
def _lowerCAmelCase ( cls : Dict , _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Union[Path, str] , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(_SCREAMING_SNAKE_CASE ) , exist_ok=_SCREAMING_SNAKE_CASE )
# Prevent parallel extractions
SCREAMING_SNAKE_CASE : List[str] = str(Path(_SCREAMING_SNAKE_CASE ).with_suffix('.lock' ) )
with FileLock(_SCREAMING_SNAKE_CASE ):
shutil.rmtree(_SCREAMING_SNAKE_CASE , ignore_errors=_SCREAMING_SNAKE_CASE )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : int = extractor if extractor != 'deprecated' else extractor_format
else:
SCREAMING_SNAKE_CASE : int = cls.extractors[extractor_format]
return extractor.extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=_SCREAMING_SNAKE_CASE , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_SCREAMING_SNAKE_CASE ):
return extractor.extract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 265
| 1
|
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = ComputeEnvironment.AMAZON_SAGEMAKER
snake_case__ : str = True
snake_case__ : int = "ml.p3.2xlarge"
snake_case__ : Any = "accelerate_sagemaker_execution_role"
snake_case__ : Optional[int] = "hf-sm"
snake_case__ : Any = "us-east-1"
snake_case__ : Optional[int] = 1
snake_case__ : Tuple = "accelerate-sagemaker-1"
snake_case__ : List[str] = "1.6"
snake_case__ : Optional[Any] = "4.4"
snake_case__ : str = "train.py"
snake_case__ : List[Any] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
snake_case__ : str = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__SCREAMING_SNAKE_CASE = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , UpperCAmelCase__ )
assert isinstance(converted_args["do_train"] , UpperCAmelCase__ )
assert isinstance(converted_args["epochs"] , UpperCAmelCase__ )
assert isinstance(converted_args["learning_rate"] , UpperCAmelCase__ )
assert isinstance(converted_args["max_steps"] , UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 553
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def UpperCAmelCase__ (lowerCAmelCase_=None , lowerCAmelCase_=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCAmelCase_ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str = field(
metadata={"help": "The csv file to plot."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={"help": "Disable logarithmic scale when plotting"} , )
snake_case__ : bool = field(
default=UpperCamelCase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
snake_case__ : Optional[str] = field(
default=UpperCamelCase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
snake_case__ : Optional[List[str]] = list_field(
default=UpperCamelCase , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
try:
int(lowerCAmelCase_ )
return True
except ValueError:
return False
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
try:
float(lowerCAmelCase_ )
return True
except ValueError:
return False
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
__SCREAMING_SNAKE_CASE = csv.DictReader(UpperCAmelCase__ )
for row in reader:
__SCREAMING_SNAKE_CASE = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
__SCREAMING_SNAKE_CASE = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
__SCREAMING_SNAKE_CASE = float(row["result"] )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = plt.subplots()
__SCREAMING_SNAKE_CASE = "Time usage" if self.args.is_time else "Memory usage"
__SCREAMING_SNAKE_CASE = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__SCREAMING_SNAKE_CASE = sorted(set(self.result_dict[model_name]["bsz"] ) )
__SCREAMING_SNAKE_CASE = sorted(set(self.result_dict[model_name]["seq_len"] ) )
__SCREAMING_SNAKE_CASE = self.result_dict[model_name]["result"]
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__SCREAMING_SNAKE_CASE = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__SCREAMING_SNAKE_CASE = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCAmelCase__ , )
else:
__SCREAMING_SNAKE_CASE = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
__SCREAMING_SNAKE_CASE = np.asarray(UpperCAmelCase__ , UpperCAmelCase__ )[: len(UpperCAmelCase__ )]
plt.scatter(
UpperCAmelCase__ , UpperCAmelCase__ , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(UpperCAmelCase__ , UpperCAmelCase__ , "--" )
title_str += F""" {label_model_name} vs."""
__SCREAMING_SNAKE_CASE = title_str[:-4]
__SCREAMING_SNAKE_CASE = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(UpperCAmelCase__ )
plt.xlabel(UpperCAmelCase__ )
plt.ylabel(UpperCAmelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
__SCREAMING_SNAKE_CASE = Plot(args=lowerCAmelCase_ )
plot.plot()
if __name__ == "__main__":
main()
| 553
| 1
|
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : str=99 , __magic_name__ : List[str]=13 , __magic_name__ : Any=16 , __magic_name__ : Any=7 , __magic_name__ : List[str]=True , __magic_name__ : int=True , __magic_name__ : Tuple=True , __magic_name__ : str=False , __magic_name__ : int=True , __magic_name__ : Optional[Any]=2 , __magic_name__ : List[Any]=32 , __magic_name__ : int=4 , __magic_name__ : str=4 , __magic_name__ : Optional[int]=30 , __magic_name__ : Optional[Any]=0 , __magic_name__ : List[Any]=1 , __magic_name__ : List[str]=2 , __magic_name__ : Optional[Any]=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = decoder_seq_length
# For common tests
lowerCAmelCase__ = self.decoder_seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_model
lowerCAmelCase__ = decoder_layers
lowerCAmelCase__ = decoder_layers
lowerCAmelCase__ = decoder_ffn_dim
lowerCAmelCase__ = decoder_attention_heads
lowerCAmelCase__ = decoder_attention_heads
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = decoder_start_token_id
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = None
lowerCAmelCase__ = decoder_seq_length
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_attention_mask:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowerCAmelCase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , ):
"""simple docstring"""
lowerCAmelCase__ = True
lowerCAmelCase__ = TrOCRDecoder(config=__magic_name__ ).to(__magic_name__ ).eval()
lowerCAmelCase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowerCAmelCase__ = model(__magic_name__ , use_cache=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ , use_cache=__magic_name__ )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) + 1 )
lowerCAmelCase__ = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = model(__magic_name__ )["last_hidden_state"]
lowerCAmelCase__ = model(__magic_name__ , past_key_values=__magic_name__ )["last_hidden_state"]
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
snake_case__ :Optional[int] = (TrOCRForCausalLM,) if is_torch_available() else ()
snake_case__ :Optional[Any] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
snake_case__ :Dict = True
snake_case__ :Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = TrOCRStandaloneDecoderModelTester(self , is_training=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
| 48
|
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48
| 1
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = '''T5Config'''
def __UpperCAmelCase ( _UpperCAmelCase : jnp.array , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> jnp.ndarray:
__snake_case = jnp.zeros_like(_UpperCAmelCase )
__snake_case = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__snake_case = shifted_input_ids.at[:, 0].set(_UpperCAmelCase )
__snake_case = jnp.where(shifted_input_ids == -1_00 , _UpperCAmelCase , _UpperCAmelCase )
return shifted_input_ids
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mt5"""
__SCREAMING_SNAKE_CASE = MTaConfig
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mt5"""
__SCREAMING_SNAKE_CASE = MTaConfig
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mt5"""
__SCREAMING_SNAKE_CASE = MTaConfig
| 680
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680
| 1
|
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , *__snake_case : int , **__snake_case : Union[str, Any] ) -> None:
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 471
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=False ):
_a : List[Any] = OmegaConf.load(UpperCamelCase_ )
if display:
print(yaml.dump(OmegaConf.to_container(UpperCamelCase_ ) ) )
return config
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
if conf_path is None:
_a : Tuple = '''./model_checkpoints/vqgan_only.yaml'''
_a : Dict = load_config(UpperCamelCase_ , display=UpperCamelCase_ )
_a : Optional[int] = VQModel(**config.model.params )
if ckpt_path is None:
_a : List[str] = '''./model_checkpoints/vqgan_only.pt'''
_a : Optional[int] = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
if ".ckpt" in ckpt_path:
_a : Dict = sd['''state_dict''']
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
model.to(UpperCamelCase_ )
del sd
return model
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a , _a , _a : Optional[Any] = model.encode(UpperCamelCase_ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_a : List[str] = model.decode(UpperCamelCase_ )
return xrec
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_=False ):
_a , _a : Tuple = string.rsplit('''.''' , 1 )
if reload:
_a : int = importlib.import_module(UpperCamelCase_ )
importlib.reload(UpperCamelCase_ )
return getattr(importlib.import_module(UpperCamelCase_ , package=UpperCamelCase_ ) , cls )
def lowerCamelCase_ ( UpperCamelCase_ ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=True , UpperCamelCase_=True ):
_a : List[Any] = instantiate_from_config(UpperCamelCase_ )
if sd is not None:
model.load_state_dict(UpperCamelCase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
# load the specified checkpoint
if ckpt:
_a : Tuple = torch.load(UpperCamelCase_ , map_location='''cpu''' )
_a : int = pl_sd['''global_step''']
print(f"""loaded model from global step {global_step}.""" )
else:
_a : Dict = {'''state_dict''': None}
_a : List[str] = None
_a : List[Any] = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=UpperCamelCase_ , eval_mode=UpperCamelCase_ )['''model''']
return model, global_step
| 471
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A ):
'''simple docstring'''
__lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__(self , *A , **A ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __A ( metaclass=A ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__(self , *A , **A ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __A ( metaclass=A ):
'''simple docstring'''
__lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__(self , *A , **A ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __A ( metaclass=A ):
'''simple docstring'''
__lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__(self , *A , **A ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __A ( metaclass=A ):
'''simple docstring'''
__lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__(self , *A , **A ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> int:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __A ( metaclass=A ):
'''simple docstring'''
__lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__(self , *A , **A ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def a__ (cls , *A , **A ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 710
|
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a__ (self , A=0 ) -> str:
"""simple docstring"""
_a = floats_tensor((1, 3, 128, 128) , rng=random.Random(A ) )
_a = np.random.RandomState(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a__ (self ) -> str:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a__ (self ) -> int:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
# warmup pass to apply optimizations
_a = pipe(**self.get_dummy_inputs() )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a__ (self ) -> Dict:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
_a = self.get_dummy_inputs()
_a = pipe(**A ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_a = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
@property
def a__ (self ) -> Tuple:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ (self ) -> Any:
"""simple docstring"""
_a = ort.SessionOptions()
_a = False
return options
def a__ (self ) -> str:
"""simple docstring"""
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_a = init_image.resize((768, 512) )
# using the PNDM scheduler by default
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
_a = '''A fantasy landscape, trending on artstation'''
_a = np.random.RandomState(0 )
_a = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type='''np''' , )
_a = output.images
_a = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_a = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_a = init_image.resize((768, 512) )
_a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
_a = '''A fantasy landscape, trending on artstation'''
_a = np.random.RandomState(0 )
_a = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type='''np''' , )
_a = output.images
_a = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
_a = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 352
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60
| 1
|
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Optional[int]=None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = previous
SCREAMING_SNAKE_CASE = next_node
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return F"""{self.data}"""
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.data
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.next
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.previous
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = head
def __iter__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return self
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
SCREAMING_SNAKE_CASE = self.current.get_data()
SCREAMING_SNAKE_CASE = self.current.get_next()
return value
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None # First node in list
SCREAMING_SNAKE_CASE = None # Last node in list
def __str__( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.head
SCREAMING_SNAKE_CASE = []
while current is not None:
nodes.append(current.get_data() )
SCREAMING_SNAKE_CASE = current.get_next()
return " ".join(str(lowerCamelCase__ ) for node in nodes )
def __contains__( self : Tuple ,lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.head
while current:
if current.get_data() == value:
return True
SCREAMING_SNAKE_CASE = current.get_next()
return False
def __iter__( self : Dict ) -> Dict:
'''simple docstring'''
return LinkedListIterator(self.head )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = node
else:
self.insert_before_node(self.head ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : Node ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.insert_after_node(self.tail ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Node(lowerCamelCase__ )
if self.head is None:
self.set_head(lowerCamelCase__ )
else:
self.set_tail(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Node ,lowerCamelCase__ : Node ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = node.previous
if node.get_previous() is None:
SCREAMING_SNAKE_CASE = node_to_insert
else:
SCREAMING_SNAKE_CASE = node_to_insert
SCREAMING_SNAKE_CASE = node_to_insert
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Node ,lowerCamelCase__ : Node ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = node.next
if node.get_next() is None:
SCREAMING_SNAKE_CASE = node_to_insert
else:
SCREAMING_SNAKE_CASE = node_to_insert
SCREAMING_SNAKE_CASE = node_to_insert
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = Node(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase__ ,lowerCamelCase__ )
return
current_position += 1
SCREAMING_SNAKE_CASE = node.next
self.insert_after_node(self.tail ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : int ) -> Node:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.head
while node:
if node.get_data() == item:
return node
SCREAMING_SNAKE_CASE = node.get_next()
raise Exception("""Node not found""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> int:
'''simple docstring'''
if (node := self.get_node(lowerCamelCase__ )) is not None:
if node == self.head:
SCREAMING_SNAKE_CASE = self.head.get_next()
if node == self.tail:
SCREAMING_SNAKE_CASE = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase__ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : Node ) -> None:
'''simple docstring'''
if node.get_next():
SCREAMING_SNAKE_CASE = node.previous
if node.get_previous():
SCREAMING_SNAKE_CASE = node.next
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self.head is None
def __lowercase ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : str ,**lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Tuple ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : int ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : str ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : str ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> int:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[Any] = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : str ,**lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
| 116
| 1
|
'''simple docstring'''
def snake_case ( snake_case : int ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
_UpperCamelCase : int = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 284
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_UpperCamelCase : Union[str, Any] = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8000,
"sample_size": 13_1072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
}
def snake_case ( snake_case : int , snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
return torch.atana(snake_case , snake_case ) / math.pi * 2
def snake_case ( snake_case : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
lowerCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class _snake_case ( a_ ):
pass
class _snake_case ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
lowerCAmelCase = DiffusionAttnUnetaD(_SCREAMING_SNAKE_CASE , n_attn_layers=4 )
lowerCAmelCase = deepcopy(self.diffusion )
lowerCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=_SCREAMING_SNAKE_CASE )
def snake_case ( snake_case : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = MODELS_MAP[model_name]['url']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
_UpperCamelCase : Union[str, Any] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
_UpperCamelCase : int = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
_UpperCamelCase : Union[str, Any] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
_UpperCamelCase : Dict = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
_UpperCamelCase : int = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
_UpperCamelCase : Optional[int] = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def snake_case ( snake_case : str ) -> List[str]:
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def snake_case ( snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F'Attn error with {name}' )
def snake_case ( snake_case : str , snake_case : Union[str, Any]=13 ) -> int:
"""simple docstring"""
lowerCAmelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
lowerCAmelCase = 0
if string.startswith('net.3.' ):
depth += 1
lowerCAmelCase = string[6:]
elif string.startswith('net.' ):
lowerCAmelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
lowerCAmelCase = string[7:]
if string.startswith('main.' ):
lowerCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
lowerCAmelCase = string[:2]
lowerCAmelCase = string[2:]
else:
lowerCAmelCase = string[0]
lowerCAmelCase = string[1:]
if depth == max_depth:
lowerCAmelCase = MID_NUM_TO_LAYER[layer_num]
lowerCAmelCase = 'mid_block'
elif depth > 0 and int(snake_case ) < 7:
lowerCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
lowerCAmelCase = F'down_blocks.{depth}'
elif depth > 0 and int(snake_case ) > 7:
lowerCAmelCase = UP_NUM_TO_LAYER[layer_num]
lowerCAmelCase = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
lowerCAmelCase = DEPTH_0_TO_LAYER[layer_num]
lowerCAmelCase = F'up_blocks.{max_depth - 1}' if int(snake_case ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
lowerCAmelCase = string_left[1:]
if "resnets" in new_layer:
lowerCAmelCase = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
lowerCAmelCase = convert_attn_naming(snake_case )
lowerCAmelCase = new_string_left
if not isinstance(snake_case , snake_case ):
lowerCAmelCase = prefix + '.' + new_layer + '.' + string_left
else:
lowerCAmelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def snake_case ( snake_case : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
lowerCAmelCase = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
lowerCAmelCase = transform_conv_attns(snake_case , snake_case , snake_case )
else:
lowerCAmelCase = v
return new_state_dict
def snake_case ( snake_case : int , snake_case : int , snake_case : Optional[int] ) -> Any:
"""simple docstring"""
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
lowerCAmelCase = v[:, :, 0]
else:
# bias
lowerCAmelCase = v
else:
# qkv matrices
lowerCAmelCase = v.shape[0]
lowerCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def snake_case ( snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
lowerCAmelCase = download(snake_case )
lowerCAmelCase = MODELS_MAP[model_name]['sample_rate']
lowerCAmelCase = MODELS_MAP[model_name]['sample_size']
lowerCAmelCase = Object()
lowerCAmelCase = sample_size
lowerCAmelCase = sample_rate
lowerCAmelCase = 0
lowerCAmelCase = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
lowerCAmelCase = diffusers_model.state_dict()
lowerCAmelCase = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )['state_dict'] )
lowerCAmelCase = orig_model.diffusion_ema.eval()
lowerCAmelCase = orig_model.state_dict()
lowerCAmelCase = rename_orig_weights(snake_case )
lowerCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(snake_case ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
lowerCAmelCase = value.squeeze()
lowerCAmelCase = value
diffusers_model.load_state_dict(snake_case )
lowerCAmelCase = 100
lowerCAmelCase = 33
lowerCAmelCase = IPNDMScheduler(num_train_timesteps=snake_case )
lowerCAmelCase = torch.manual_seed(snake_case )
lowerCAmelCase = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
lowerCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
lowerCAmelCase = get_crash_schedule(snake_case )
lowerCAmelCase = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
lowerCAmelCase = torch.manual_seed(33 )
lowerCAmelCase = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
lowerCAmelCase = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
lowerCAmelCase = generated.clamp(-1 , 1 )
lowerCAmelCase = (generated - audio).abs().sum()
lowerCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , snake_case )
print('Diff max' , snake_case )
assert diff_max < 1e-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
_UpperCamelCase : List[str] = parser.parse_args()
main(args)
| 284
| 1
|
from ..utils import DummyObject, requires_backends
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> int:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> List[str]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> List[Any]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Any:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> int:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> str:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> List[Any]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> List[str]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> List[str]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> int:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> int:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Any:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> int:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Any:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Union[str, Any]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Union[str, Any]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> int:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class __magic_name__ (metaclass=__lowercase ):
lowerCamelCase__ = ['''sentencepiece''']
def __init__( self , *_a , **_a ) -> Tuple:
requires_backends(self , ["sentencepiece"] )
| 226
|
from functools import lru_cache
def A(__a: int ):
lowerCAmelCase_ = 2
lowerCAmelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__a )
if n > 1:
factors.add(__a )
return factors
@lru_cache
def A(__a: int ):
return len(unique_prime_factors(__a ) )
def A(__a: list ):
return len(set(__a ) ) in (0, 1)
def A(__a: int ):
lowerCAmelCase_ = 2
while True:
# Increment each value of a generated range
lowerCAmelCase_ = [base + i for i in range(__a )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCAmelCase_ = [upf_len(__a ) for x in group]
checker.append(__a )
# If all numbers in the list are equal, return the group variable.
if equality(__a ):
return group
# Increment our base variable by 1
base += 1
def A(__a: int = 4 ):
lowerCAmelCase_ = run(__a )
return results[0] if len(__a ) else None
if __name__ == "__main__":
print(solution())
| 226
| 1
|
from math import factorial
def _A ( lowerCAmelCase_ : int = 20 ):
"""simple docstring"""
lowerCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCAmelCase__ = n // 2
return int(factorial(lowerCAmelCase_ ) / (factorial(lowerCAmelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 61
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A = 'main'
# Default branch name
A = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
A = 'aaaaaaa'
# This commit does not exist, so we should 404.
A = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
A = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
print("Bonjour!" )
yield
print("Au revoir!" )
class _a ( unittest.TestCase):
def __lowercase ( self : Dict ) -> Optional[int]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class _a ( unittest.TestCase):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __lowercase ( self : Union[str, Any] , _lowercase : Optional[int] ) -> Optional[Any]:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __lowercase ( self : Any , _lowercase : Any ) -> Dict:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __lowercase ( self : str , _lowercase : List[Any] ) -> Optional[int]:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def __lowercase ( self : Any ) -> Union[str, Any]:
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
self.assertEqual(find_labels(_lowercase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_lowercase ) , ["start_positions", "end_positions"] )
class _a ( SCREAMING_SNAKE_CASE__):
pass
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
@require_tf
def __lowercase ( self : Optional[Any] ) -> int:
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
self.assertEqual(find_labels(_lowercase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_lowercase ) , ["start_positions", "end_positions"] )
class _a ( SCREAMING_SNAKE_CASE__):
pass
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
@require_flax
def __lowercase ( self : Tuple ) -> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
class _a ( SCREAMING_SNAKE_CASE__):
pass
self.assertEqual(find_labels(_lowercase ) , [] )
| 449
| 0
|
from __future__ import annotations
from fractions import Fraction
def snake_case ( snake_case__ :int , snake_case__ :int) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def snake_case ( snake_case__ :int) -> list[str]:
_A = []
_A = 11
_A = int("""1""" + """0""" * digit_len)
for num in range(snake_case__ , snake_case__):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case__ , snake_case__):
solutions.append(F'''{num}/{den}''')
den += 1
num += 1
_A = 10
return solutions
def snake_case ( snake_case__ :int = 2) -> int:
_A = 1.0
for fraction in fraction_list(snake_case__):
_A = Fraction(snake_case__)
result *= frac.denominator / frac.numerator
return int(snake_case__)
if __name__ == "__main__":
print(solution())
| 708
|
from collections import defaultdict
def snake_case ( snake_case__ :int) -> int:
_A = 1
_A = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case__)
if ret % 2 == 0:
cuts.append(snake_case__)
return ret
def snake_case ( ) -> Any:
dfs(1)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 83
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : int = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : Dict = """"""
else:
lowerCAmelCase__ : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : str = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = dct.pop(UpperCamelCase )
lowerCAmelCase__ : Dict = val
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : Dict = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=UpperCamelCase , )
lowerCAmelCase__ : Dict = ViTHybridConfig(backbone_config=UpperCamelCase , image_size=384 , num_labels=1000 )
lowerCAmelCase__ : int = False
# load original model from timm
lowerCAmelCase__ : Dict = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : List[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = create_rename_keys(UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """huggingface/label-files"""
lowerCAmelCase__ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : List[str] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Tuple = idalabel
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase__ : Any = ViTHybridModel(UpperCamelCase ).eval()
else:
lowerCAmelCase__ : Any = ViTHybridForImageClassification(UpperCamelCase ).eval()
model.load_state_dict(UpperCamelCase )
# create image processor
lowerCAmelCase__ : Any = create_transform(**resolve_data_config({} , model=UpperCamelCase ) )
lowerCAmelCase__ : Any = transform.transforms
lowerCAmelCase__ : Any = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCAmelCase__ : Optional[Any] = ViTHybridImageProcessor(
do_resize=UpperCamelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : str = transform(UpperCamelCase ).unsqueeze(0 )
lowerCAmelCase__ : str = processor(UpperCamelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase , UpperCamelCase )
# verify logits
with torch.no_grad():
lowerCAmelCase__ : str = model(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
lowerCAmelCase__ : Union[str, Any] = timm_model.forward_features(UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase__ : Union[str, Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
_lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 565
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = '''Hello world! cécé herlolip'''
_lowerCAmelCase = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = BertAbsConfig(
temp_dir=""".""" , finetune_bert=UpperCamelCase , large=UpperCamelCase , share_emb=UpperCamelCase , use_bert_emb=UpperCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCAmelCase__ : int = torch.load(UpperCamelCase , lambda UpperCamelCase , UpperCamelCase : storage )
lowerCAmelCase__ : List[str] = AbsSummarizer(UpperCamelCase , torch.device("""cpu""" ) , UpperCamelCase )
original.eval()
lowerCAmelCase__ : Optional[Any] = BertAbsSummarizer(UpperCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
lowerCAmelCase__ : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
lowerCAmelCase__ : Optional[int] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase )) )
lowerCAmelCase__ : List[Any] = torch.tensor(UpperCamelCase ).unsqueeze(0 )
lowerCAmelCase__ : str = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase )) )
lowerCAmelCase__ : List[str] = torch.tensor(UpperCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCAmelCase__ : Dict = encoder_input_ids
lowerCAmelCase__ : Tuple = decoder_input_ids
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : List[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCAmelCase__ : Optional[Any] = original(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )[0]
lowerCAmelCase__ : Optional[Any] = original.generator(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = new_model(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )[0]
lowerCAmelCase__ : int = new_model.generator(UpperCamelCase )
lowerCAmelCase__ : str = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase ) )
lowerCAmelCase__ : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase ) )
lowerCAmelCase__ : Dict = torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 565
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_A : Union[str, Any] = logging.get_logger(__name__)
_A : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_A : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
__lowerCAmelCase = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase_ ( snake_case_ : Any , snake_case_ : Tuple ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowerCAmelCase = None
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
elif name.split(""".""" )[0] == "proj":
__lowerCAmelCase = fairseq_model.proj
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowerCamelCase_ )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowerCamelCase_ )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
__lowerCAmelCase = """weight"""
else:
__lowerCAmelCase = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ) -> Any:
'''simple docstring'''
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase_ )
def UpperCamelCase_ ( snake_case_ : str ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = [line.split(""" """ )[0] for line in lines]
__lowerCAmelCase = len(lowerCamelCase_ )
__lowerCAmelCase = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(lowerCamelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Any , ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = WavaVecaConfig.from_pretrained(lowerCamelCase_ )
__lowerCAmelCase = SpeechaTextaConfig.from_pretrained(
lowerCamelCase_ , vocab_size=lowerCamelCase_ , decoder_layers=lowerCamelCase_ , do_stable_layer_norm=lowerCamelCase_ )
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
__lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
__lowerCAmelCase = model[0].eval()
# set weights for wav2vec2 encoder
__lowerCAmelCase = WavaVecaModel(lowerCamelCase_ )
__lowerCAmelCase = recursively_load_weights_wavaveca(model.encoder , lowerCamelCase_ )
__lowerCAmelCase = SpeechaTextaForCausalLM(lowerCamelCase_ )
__lowerCAmelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase_ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
__lowerCAmelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowerCAmelCase = SpeechEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
__lowerCAmelCase = False
# add projection layer
__lowerCAmelCase = nn.Parameter(projection_layer.weight )
__lowerCAmelCase = nn.Parameter(projection_layer.bias )
__lowerCAmelCase = create_vocab_dict(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , """vocab.json""" ) , """w""" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
__lowerCAmelCase = SpeechaTextaTokenizer(os.path.join(lowerCamelCase_ , """vocab.json""" ) )
tokenizer.save_pretrained(lowerCamelCase_ )
__lowerCAmelCase = hf_wavavec.config.to_dict()
__lowerCAmelCase = tokenizer.pad_token_id
__lowerCAmelCase = tokenizer.bos_token_id
__lowerCAmelCase = tokenizer.eos_token_id
__lowerCAmelCase = """speech_to_text_2"""
__lowerCAmelCase = """wav2vec2"""
__lowerCAmelCase = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase_ )
hf_wavavec.save_pretrained(lowerCamelCase_ )
feature_extractor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_A : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 713
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int = 1_00 ) -> int:
'''simple docstring'''
__lowerCAmelCase = n * (n + 1) * (2 * n + 1) / 6
__lowerCAmelCase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 330
| 0
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Union[str, Any]=None , ) -> Any:
if attention_mask is None:
A_ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A_ : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A_ : Union[str, Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_lowerCAmelCase )
if decoder_head_mask is None:
A_ : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCAmelCase )
if cross_attn_head_mask is None:
A_ : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[Any] , snake_case :List[Any] , snake_case :int=13 , snake_case :List[Any]=7 , snake_case :Optional[int]=True , snake_case :int=False , snake_case :Optional[int]=99 , snake_case :Optional[int]=16 , snake_case :Any=2 , snake_case :Dict=4 , snake_case :Any=4 , snake_case :int="relu" , snake_case :str=0.1 , snake_case :List[str]=0.1 , snake_case :Tuple=0.0 , snake_case :Tuple=0.0 , snake_case :str=20 , snake_case :str=2 , snake_case :Union[str, Any]=1 , snake_case :List[str]=0 , ):
'''simple docstring'''
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : List[str] = seq_length
A_ : Any = is_training
A_ : str = use_labels
A_ : Tuple = vocab_size
A_ : List[Any] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : int = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : List[str] = encoder_layerdrop
A_ : Optional[int] = decoder_layerdrop
A_ : str = max_position_embeddings
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = pad_token_id
A_ : Union[str, Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Dict = self.eos_token_id # Eos Token
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A_ : Optional[int] = input_ids.clamp(self.pad_token_id + 1 )
A_ : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
A_ : Union[str, Any] = self.get_config()
A_ : Any = prepare_mam_aaa_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ , A_ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Any ):
'''simple docstring'''
A_ : Tuple = MaMaaaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
A_ : Dict = inputs_dict["input_ids"]
A_ : Union[str, Any] = inputs_dict["attention_mask"]
A_ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
A_ : Union[str, Any] = model(snake_case , attention_mask=snake_case , head_mask=snake_case , use_cache=snake_case )
A_ , A_ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A_ : List[str] = model(snake_case , attention_mask=snake_case )["last_hidden_state"]
A_ : Dict = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"last_hidden_state"
]
# select random slice
A_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-2 ) )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Any , snake_case :Any ):
'''simple docstring'''
A_ : Dict = MaMaaaModel(config=snake_case ).to(snake_case ).eval()
A_ : int = model(**snake_case )
A_ : Dict = outputs.encoder_last_hidden_state
A_ : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : int = model.get_encoder()
encoder.save_pretrained(snake_case )
A_ : Any = MaMaaaEncoder.from_pretrained(snake_case ).to(snake_case )
A_ : List[Any] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : str = model.get_decoder()
decoder.save_pretrained(snake_case )
A_ : List[Any] = MaMaaaDecoder.from_pretrained(snake_case ).to(snake_case )
A_ : Tuple = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=snake_case , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :Optional[int] , snake_case :str ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Dict = MaMaaaModelTester(self )
A_ : List[str] = ConfigTester(self , config_class=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
A_ , A_ : Dict = model_class.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertEqual(info["missing_keys"] , [] )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
A_ : List[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
A_ : List[Any] = copy.deepcopy(self._prepare_for_class(snake_case , snake_case ) )
if not self.is_encoder_decoder:
A_ : Dict = inputs["input_ids"]
del inputs["input_ids"]
else:
A_ : Dict = inputs["input_ids"]
A_ : List[str] = inputs.get("decoder_input_ids" , snake_case )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , snake_case )
A_ : Dict = model.get_input_embeddings()
if not self.is_encoder_decoder:
A_ : Tuple = wte(snake_case )
else:
A_ : List[Any] = wte(snake_case )
A_ : Tuple = wte(snake_case )
with torch.no_grad():
model(**snake_case )[0]
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A_ : Tuple = input_dict["input_ids"]
A_ : List[Any] = input_ids.ne(1 ).to(snake_case )
A_ : List[Any] = MaMaaaForConditionalGeneration(snake_case ).eval().to(snake_case )
if torch_device == "cuda":
model.half()
model.generate(snake_case , attention_mask=snake_case )
model.generate(num_beams=4 , do_sample=snake_case , early_stopping=snake_case , num_return_sequences=3 )
def __snake_case ( _lowerCAmelCase : int ) -> Union[str, Any]:
return torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Any = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(snake_case )
A_ : int = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
A_ : Union[str, Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
A_ : Dict = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
A_ : Optional[Any] = model(**snake_case )[0]
A_ : str = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
A_ : Union[str, Any] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(snake_case )
# change to intended input
A_ : int = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
A_ : List[Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
A_ : Any = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
A_ : Optional[int] = model(**snake_case )[0]
A_ : int = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
A_ : str = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(snake_case )
A_ : Optional[Any] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
A_ : Optional[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
A_ : str = tokenizer(snake_case , padding=snake_case , return_tensors="pt" )
A_ : Union[str, Any] = model.generate(
input_ids=dct["input_ids"].to(snake_case ) , attention_mask=dct["attention_mask"].to(snake_case ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
A_ : str = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
A_ : Tuple = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=snake_case , skip_special_tokens=snake_case )
assert generated == expected_en
| 454
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = self.dummy_uncond_unet
A_ : Any = DDIMScheduler()
A_ : List[str] = self.dummy_vq_model
A_ : Union[str, Any] = LDMPipeline(unet=snake_case , vqvae=snake_case , scheduler=snake_case )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
A_ : Any = torch.manual_seed(0 )
A_ : Optional[Any] = ldm(generator=snake_case , num_inference_steps=2 , output_type="numpy" ).images
A_ : Dict = torch.manual_seed(0 )
A_ : Any = ldm(generator=snake_case , num_inference_steps=2 , output_type="numpy" , return_dict=snake_case )[0]
A_ : int = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : List[str] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
A_ : int = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=snake_case , num_inference_steps=5 , output_type="numpy" ).images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : Any = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
A_ : List[Any] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 454
| 1
|
"""simple docstring"""
def a_ ( ):
return 1
def a_ ( lowerCamelCase ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a_ ( lowerCamelCase ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCamelCase )
def a_ ( lowerCamelCase ):
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowerCamelCase )
def a_ ( lowerCamelCase ):
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowerCamelCase )
def a_ ( lowerCamelCase ):
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowerCamelCase )
def a_ ( lowerCamelCase ):
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowerCamelCase )
def a_ ( lowerCamelCase ):
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowerCamelCase )
def a_ ( lowerCamelCase = 2_0_0 ):
return two_pound(lowerCamelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 713
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : str ,**lowerCamelCase__ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor
def __lowerCAmelCase ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_image_processor(do_normalize=lowerCamelCase__ ,padding_value=1.0 )
UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase__ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='np' )
UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_torch
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = [torch.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = [[1_764, 2_646]]
UpperCAmelCase__ = [[683, 1_024]]
UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,torch.tensor(lowerCamelCase__ ) ,torch.tensor(lowerCamelCase__ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
# should also work with np
UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
UpperCAmelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) )
@require_vision
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : str ,**lowerCamelCase__ : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor
def __lowerCAmelCase ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_image_processor(do_normalize=lowerCamelCase__ ,padding_value=1.0 )
UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase__ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='np' )
UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_tf
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = [tf.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = [[1_764, 2_646]]
UpperCAmelCase__ = [[683, 1_024]]
UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,tf.convert_to_tensor(lowerCamelCase__ ) ,tf.convert_to_tensor(lowerCamelCase__ ) ,return_tensors='tf' ,)
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
# should also work with np
UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )]
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) )
UpperCAmelCase__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ,return_tensors='tf' )
@require_vision
@require_torchvision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = SamImageProcessor()
UpperCAmelCase__ = SamProcessor(lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ,**lowerCamelCase__ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor
def __lowerCAmelCase ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa )
UpperCAmelCase__ = [tf.convert_to_tensor(lowerCamelCase__ )]
UpperCAmelCase__ = [torch.tensor(lowerCamelCase__ )]
UpperCAmelCase__ = [[1_764, 2_646]]
UpperCAmelCase__ = [[683, 1_024]]
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='tf' )
UpperCAmelCase__ = processor.post_process_masks(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='pt' )['pixel_values'].numpy()
UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='pt' )['pixel_values'].numpy()
UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='tf' )['pixel_values'].numpy()
UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
| 632
| 0
|
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : List[str] = logging.getLogger()
def _A ( A ,A ) -> Optional[Any]:
lowercase : Any = "\n".join(A )
Path(A ).open("w" ).writelines(A )
lowerCAmelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
lowerCAmelCase : Optional[int] = """sshleifer/bart-tiny-random"""
lowerCAmelCase : List[Any] = """sshleifer/tiny-mbart"""
lowerCAmelCase : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def a__ ( self , a_ ) -> Optional[int]:
lowercase : Dict = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
lowercase : Any = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
lowercase : Optional[Any] = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(a_ , a_ )
lowercase : str = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
lowercase : Tuple = "translation_en_to_de" if model == T5_TINY else "summarization"
lowercase : int = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(a_ , "argv" , a_ ):
run_generate()
assert Path(a_ ).exists()
# os.remove(Path(output_file_name))
def a__ ( self ) -> str:
self.run_eval_tester(a_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def a__ ( self , a_ ) -> Optional[Any]:
self.run_eval_tester(a_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def a__ ( self , a_ ) -> Optional[int]:
lowercase : Any = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
lowercase : Union[str, Any] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
lowercase : int = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
lowercase : List[Any] = Path(self.get_auto_remove_tmp_dir() )
lowercase : Dict = str(tmp_dir / "scores.json" )
lowercase : List[str] = str(tmp_dir / "val.target" )
_dump_articles(a_ , text["en"] )
_dump_articles(a_ , text["de"] )
lowercase : Any = "translation_en_to_de" if model == T5_TINY else "summarization"
lowercase : int = F'''
run_eval_search.py
{model}
{str(a_ )}
{str(a_ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(a_ , "argv" , a_ ):
with CaptureStdout() as cs:
run_search()
lowercase : Dict = [" num_beams | length_penalty", model, "Best score args"]
lowercase : int = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(a_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(a_ ).exists()
os.remove(Path(a_ ) )
| 372
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( A ,A ) -> str:
lowercase : Optional[int] = old_name
if "patch_embed" in old_name:
lowercase , lowercase , lowercase : Tuple = old_name.split("." )
if layer == "0":
lowercase : int = old_name.replace("0" ,"convolution1" )
elif layer == "1":
lowercase : List[str] = old_name.replace("1" ,"batchnorm_before" )
elif layer == "3":
lowercase : Dict = old_name.replace("3" ,"convolution2" )
else:
lowercase : Union[str, Any] = old_name.replace("4" ,"batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" ,A ):
lowercase : List[str] = r"\b\d{2}\b"
if bool(re.search(A ,A ) ):
lowercase : str = re.search(r"\d\.\d\d." ,A ).group()
else:
lowercase : int = re.search(r"\d\.\d." ,A ).group()
if int(match[0] ) < 6:
lowercase : str = old_name.replace(A ,"" )
lowercase : List[str] = trimmed_name.replace("network" ,match[0] + ".meta4D_layers.blocks." + match[2:-1] )
lowercase : Optional[Any] = "intermediate_stages." + trimmed_name
else:
lowercase : str = old_name.replace(A ,"" )
if int(match[2] ) < num_meta4D_last_stage:
lowercase : Optional[int] = trimmed_name.replace("network" ,"meta4D_layers.blocks." + match[2] )
else:
lowercase : List[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowercase : List[Any] = trimmed_name.replace("network" ,"meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
lowercase : str = trimmed_name.replace("norm1" ,"layernorm1" )
elif "norm2" in old_name:
lowercase : Optional[Any] = trimmed_name.replace("norm2" ,"layernorm2" )
elif "fc1" in old_name:
lowercase : Optional[int] = trimmed_name.replace("fc1" ,"linear_in" )
elif "fc2" in old_name:
lowercase : str = trimmed_name.replace("fc2" ,"linear_out" )
lowercase : Dict = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." ,A ):
lowercase : Union[str, Any] = old_name.replace("network" ,"intermediate_stages" )
if "fc" in new_name:
lowercase : Any = new_name.replace("fc" ,"convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowercase : Optional[Any] = new_name.replace("norm1" ,"batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowercase : List[str] = new_name.replace("norm2" ,"batchnorm_after" )
if "proj" in new_name:
lowercase : Optional[int] = new_name.replace("proj" ,"projection" )
if "dist_head" in new_name:
lowercase : Tuple = new_name.replace("dist_head" ,"distillation_classifier" )
elif "head" in new_name:
lowercase : Tuple = new_name.replace("head" ,"classifier" )
elif "patch_embed" in new_name:
lowercase : Optional[int] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowercase : str = new_name.replace("norm" ,"layernorm" )
lowercase : List[Any] = "efficientformer." + new_name
else:
lowercase : Optional[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( A ,A ) -> Optional[Any]:
for key in checkpoint.copy().keys():
lowercase : List[str] = checkpoint.pop(A )
lowercase : int = val
return checkpoint
def _A ( ) -> Optional[int]:
lowercase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : Optional[Any] = Image.open(requests.get(A ,stream=A ).raw )
return image
def _A ( A ,A ,A ,A ) -> List[Any]:
lowercase : Optional[int] = torch.load(A ,map_location="cpu" )["model"]
lowercase : int = EfficientFormerConfig.from_json_file(A )
lowercase : Tuple = EfficientFormerForImageClassificationWithTeacher(A )
lowercase : int = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
lowercase : Optional[int] = config.depths[-1] - config.num_metaad_blocks + 1
lowercase : int = convert_torch_checkpoint(A ,A )
model.load_state_dict(A )
model.eval()
lowercase : List[Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
lowercase : Tuple = prepare_img()
lowercase : Optional[int] = 2_5_6
lowercase : str = 2_2_4
lowercase : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} ,crop_size={"height": crop_size, "width": crop_size} ,resample=pillow_resamplings["bicubic"] ,)
lowercase : Union[str, Any] = processor(images=A ,return_tensors="pt" ).pixel_values
# original processing pipeline
lowercase : Tuple = Compose(
[
Resize(A ,interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(A ),
ToTensor(),
Normalize(A ,A ),
] )
lowercase : List[Any] = image_transforms(A ).unsqueeze(0 )
assert torch.allclose(A ,A )
lowercase : Union[str, Any] = model(A )
lowercase : Any = outputs.logits
lowercase : List[str] = (1, 1_0_0_0)
if "l1" in model_name:
lowercase : Any = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] ,A ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowercase : List[Any] = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] ,A ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowercase : Optional[int] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(A )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' ,commit_message="Add model" ,use_temp_dir=A ,)
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' ,commit_message="Add image processor" ,use_temp_dir=A ,)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 372
| 1
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0.2 , UpperCamelCase__=0.2 ) -> Any:
'''simple docstring'''
snake_case : Tuple = bp_numa
snake_case : Dict = bp_numa
snake_case : Tuple = bp_numa
snake_case : Optional[Any] = conva_get[:2]
snake_case : Optional[Any] = conva_get[2]
snake_case : List[str] = size_pa
snake_case : Dict = rate_w
snake_case : Any = rate_t
snake_case : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
snake_case : Tuple = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case : str = -2 * np.random.rand(self.conva[1] ) + 1
snake_case : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
snake_case : Optional[int] = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCamelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(UpperCamelCase__ , "wb" ) as f:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
print(F'Model saved: {save_path}' )
@classmethod
def lowerCamelCase ( cls , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
with open(UpperCamelCase__ , "rb" ) as f:
snake_case : Tuple = pickle.load(UpperCamelCase__ ) # noqa: S301
snake_case : Optional[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
snake_case : Any = model_dic.get("size_pooling1" )
snake_case : Optional[int] = model_dic.get("num_bp1" )
snake_case : Any = model_dic.get("num_bp2" )
snake_case : Tuple = model_dic.get("num_bp3" )
snake_case : List[str] = model_dic.get("rate_weight" )
snake_case : Tuple = model_dic.get("rate_thre" )
# create model instance
snake_case : Optional[Any] = CNN(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# modify model parameter
snake_case : Dict = model_dic.get("w_conv1" )
snake_case : Union[str, Any] = model_dic.get("wkj" )
snake_case : Tuple = model_dic.get("vji" )
snake_case : int = model_dic.get("thre_conv1" )
snake_case : Optional[int] = model_dic.get("thre_bp2" )
snake_case : List[str] = model_dic.get("thre_bp3" )
return conv_ins
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return round(UpperCamelCase__ , 3 )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : int = convs[0]
snake_case : Any = convs[1]
snake_case : Optional[int] = np.shape(UpperCamelCase__ )[0]
# get the data slice of original image data, data_focus
snake_case : Any = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__ ):
snake_case : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCamelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
snake_case : List[Any] = []
snake_case : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(UpperCamelCase__ ):
snake_case : List[Any] = []
for i_focus in range(len(UpperCamelCase__ ) ):
snake_case : Dict = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCamelCase__ ) )
snake_case : Union[str, Any] = np.asmatrix(UpperCamelCase__ ).reshape(
UpperCamelCase__ , UpperCamelCase__ )
data_featuremap.append(UpperCamelCase__ )
# expanding the data slice to One dimenssion
snake_case : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCamelCase__ ) )
snake_case : List[str] = np.asarray(UpperCamelCase__ )
return focus_list, data_featuremap
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="average_pool" ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = len(featuremaps[0] )
snake_case : List[str] = int(size_map / size_pooling )
snake_case : List[str] = []
for i_map in range(len(UpperCamelCase__ ) ):
snake_case : Tuple = featuremaps[i_map]
snake_case : Union[str, Any] = []
for i_focus in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
for j_focus in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCamelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCamelCase__ ) )
snake_case : Optional[int] = np.asmatrix(UpperCamelCase__ ).reshape(UpperCamelCase__ , UpperCamelCase__ )
featuremap_pooled.append(UpperCamelCase__ )
return featuremap_pooled
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
snake_case : str = np.shape(data[i] )
snake_case : Optional[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
snake_case : Any = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCamelCase__ )
snake_case : int = np.asarray(UpperCamelCase__ )
return data_expanded
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = np.asarray(UpperCamelCase__ )
snake_case : str = np.shape(UpperCamelCase__ )
snake_case : List[str] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = []
snake_case : Tuple = 0
for i_map in range(UpperCamelCase__ ):
snake_case : Any = np.ones((size_map, size_map) )
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
for j in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[int] = pd_pool[
i_pool
]
snake_case : List[Any] = i_pool + 1
snake_case : int = np.multiply(
UpperCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(UpperCamelCase__ )
return pd_all
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=bool ) -> Tuple:
'''simple docstring'''
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(UpperCamelCase__ )) )
print((" - - Shape: Teach_Data ", np.shape(UpperCamelCase__ )) )
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = []
snake_case : int = 1_0000
while rp < n_repeat and mse >= error_accuracy:
snake_case : Optional[Any] = 0
print(F'-------------Learning Time {rp}--------------' )
for p in range(len(UpperCamelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
snake_case : Optional[int] = np.asmatrix(datas_train[p] )
snake_case : Union[str, Any] = np.asarray(datas_teach[p] )
snake_case ,snake_case : Any = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case : int = self.pooling(UpperCamelCase__ , self.size_poolinga )
snake_case : List[Any] = np.shape(UpperCamelCase__ )
snake_case : List[str] = self._expand(UpperCamelCase__ )
snake_case : Tuple = data_bp_input
snake_case : Tuple = np.dot(UpperCamelCase__ , self.vji.T ) - self.thre_bpa
snake_case : Tuple = self.sig(UpperCamelCase__ )
snake_case : Tuple = np.dot(UpperCamelCase__ , self.wkj.T ) - self.thre_bpa
snake_case : Optional[Any] = self.sig(UpperCamelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
snake_case : Union[str, Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCamelCase__ , (1 - bp_outa) ) )
snake_case : Dict = np.multiply(
np.dot(UpperCamelCase__ , self.wkj ) , np.multiply(UpperCamelCase__ , (1 - bp_outa) ) )
snake_case : Tuple = np.dot(UpperCamelCase__ , self.vji )
snake_case : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
snake_case : str = pd_conva_pooled.T.getA().tolist()
snake_case : str = self._calculate_gradient_from_pool(
UpperCamelCase__ , UpperCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
snake_case : int = self._expand_mat(pd_conva_all[k_conv] )
snake_case : List[Any] = self.rate_weight * np.dot(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
snake_case : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
snake_case : Any = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
snake_case : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
snake_case : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
snake_case : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
snake_case : Dict = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
snake_case : Dict = rp + 1
snake_case : Optional[int] = error_count / patterns
all_mse.append(UpperCamelCase__ )
def draw_error():
snake_case : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(UpperCamelCase__ , "+-" )
plt.plot(UpperCamelCase__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(UpperCamelCase__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : int = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(UpperCamelCase__ )) )
for p in range(len(UpperCamelCase__ ) ):
snake_case : List[Any] = np.asmatrix(datas_test[p] )
snake_case ,snake_case : Tuple = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case : Any = self.pooling(UpperCamelCase__ , self.size_poolinga )
snake_case : int = self._expand(UpperCamelCase__ )
snake_case : Optional[Any] = data_bp_input
snake_case : List[str] = bp_outa * self.vji.T - self.thre_bpa
snake_case : Any = self.sig(UpperCamelCase__ )
snake_case : Dict = bp_outa * self.wkj.T - self.thre_bpa
snake_case : Optional[Any] = self.sig(UpperCamelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
snake_case : Dict = [list(map(self.do_round , UpperCamelCase__ ) ) for each in produce_out]
return np.asarray(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = np.asmatrix(UpperCamelCase__ )
snake_case ,snake_case : str = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case : int = self.pooling(UpperCamelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 117
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def __lowerCAmelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case : str = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Any = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : str = 16
elif accelerator.mixed_precision != "no":
snake_case : List[Any] = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
snake_case : Any = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
snake_case : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( lowercase : Dict , lowercase : int ) -> Tuple:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
snake_case : Union[str, Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case : str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
snake_case : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : str = config["lr"]
snake_case : Dict = int(config["num_epochs"] )
snake_case : int = int(config["seed"] )
snake_case : Tuple = int(config["batch_size"] )
set_seed(lowercase )
snake_case ,snake_case : List[Any] = get_dataloaders(lowercase , lowercase )
snake_case : Optional[Any] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case : Any = batch_size // MAX_GPU_BATCH_SIZE
snake_case : int = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : str = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Tuple = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
snake_case : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case ,snake_case ,snake_case ,snake_case ,snake_case : Union[str, Any] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case : str = os.path.split(lowercase )[-1].split("." )[0]
accelerator.init_trackers(lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case : Any = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : Dict = model(**lowercase )
snake_case : int = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case : Optional[int] = model(**lowercase )
snake_case : Tuple = outputs.logits.argmax(dim=-1 )
snake_case ,snake_case : List[str] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
snake_case : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowercase ),
"epoch": epoch,
} , step=lowercase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
snake_case : int = parser.parse_args()
snake_case : List[str] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 117
| 1
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__a = trt.Logger(trt.Logger.WARNING)
__a = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__a = logging.getLogger(__name__)
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__a = parser.parse_args()
if args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__a = args.per_device_eval_batch_size
__a = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__a = True
__a = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__a = 'temp_engine/bert-fp16.engine'
if args.inta:
__a = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__a = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__a = [network.get_input(i) for i in range(network.num_inputs)]
__a = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__a = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__a = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__a = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def a ( snake_case__: str , snake_case__: List[str] , snake_case__: List[Any] , snake_case__: List[str] , snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: List[str] , snake_case__: List[Any] ):
'''simple docstring'''
lowercase_ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case__ )
# start time
lowercase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case__ ) for d_inp in d_inputs] + [int(snake_case__ ), int(snake_case__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
cuda.memcpy_dtoh_async(snake_case__ , snake_case__ , snake_case__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase_ = time.time()
lowercase_ = end_time - start_time
lowercase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__a = raw_datasets['validation'].column_names
__a = 'question' if 'question' in column_names else column_names[0]
__a = 'context' if 'context' in column_names else column_names[1]
__a = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__a = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__a = min(args.max_seq_length, tokenizer.model_max_length)
def a ( snake_case__: Tuple ):
'''simple docstring'''
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case__ , stride=args.doc_stride , return_overflowing_tokens=snake_case__ , return_offsets_mapping=snake_case__ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase_ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase_ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase_ = tokenized_examples.sequence_ids(snake_case__ )
lowercase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__a = raw_datasets['validation']
# Validation Feature Creation
__a = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__a = default_data_collator
__a = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__a = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a ( snake_case__: Tuple , snake_case__: int , snake_case__: Union[str, Any] , snake_case__: Optional[int]="eval" ):
'''simple docstring'''
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase_ = postprocess_qa_predictions(
examples=snake_case__ , features=snake_case__ , predictions=snake_case__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase_ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowercase_ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowercase_ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case__ , label_ids=snake_case__ )
__a = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a ( snake_case__: Dict ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(snake_case__ ) ) * engine.get_binding_dtype(snake_case__ ).itemsize
# Allocate device memory for inputs and outputs.
__a = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__a = cuda.mem_alloc(h_outputa.nbytes)
__a = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__a = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__a = 0.0
__a = 0
__a = timeit.default_timer()
__a = None
for step, batch in enumerate(eval_dataloader):
__a , __a = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__a , __a = outputs
__a = torch.tensor(start_logits)
__a = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__a = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__a = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__a = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__a = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__a = nested_truncate(all_preds, len(eval_dataset))
__a = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
__a = post_processing_function(eval_examples, eval_dataset, all_preds)
__a = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 97
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[Any] = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216
| 0
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class __snake_case :
"""simple docstring"""
def __init__( self : str , lowerCamelCase : List[Any] , lowerCamelCase : str=13 , lowerCamelCase : str=7 , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Union[str, Any]=99 , lowerCamelCase : int=32 , lowerCamelCase : List[str]=5 , lowerCamelCase : Dict=4 , lowerCamelCase : Dict=37 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Tuple=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Tuple=50 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Dict=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : List[str] = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : Dict = use_input_mask
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Any = use_labels
lowerCAmelCase_ : Union[str, Any] = scope
def __lowercase ( self : List[Any] ) -> Dict:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def __lowercase ( self : str ) -> Tuple:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def __lowercase ( self : str ) -> Optional[Any]:
(
(
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
),
) : Dict = self.prepare_config_and_inputs()
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowercase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Any , **lowerCamelCase : Optional[Any] , ) -> Tuple:
lowerCAmelCase_ : Dict = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Any = model(lowerCamelCase , attention_mask=lowerCamelCase )
lowerCAmelCase_ : int = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : List[Any] , **lowerCamelCase : Any , ) -> Any:
lowerCAmelCase_ : str = True
lowerCAmelCase_ : List[str] = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
lowerCAmelCase_ : List[str] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , **lowerCamelCase : Optional[Any] , ) -> Any:
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Optional[int] = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
lowerCAmelCase_ : str = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
lowerCAmelCase_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ : int = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ : Tuple = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["""hidden_states"""][0]
lowerCAmelCase_ : int = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __lowercase ( self : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , *lowerCamelCase : Tuple , ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : Any = BertGenerationEncoderTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __lowercase ( self : List[Any] ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self : str ) -> int:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __lowercase ( self : int ) -> Optional[Any]:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def __lowercase ( self : Any ) -> List[str]:
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def __lowercase ( self : Any ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
),
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase_ : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def __lowercase ( self : Dict ) -> Dict:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def __lowercase ( self : Union[str, Any] ) -> str:
lowerCAmelCase_ : Union[str, Any] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@slow
def __lowercase ( self : str ) -> int:
lowerCAmelCase_ : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCAmelCase_ : Optional[Any] = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(lowerCamelCase )[0]
lowerCAmelCase_ : Union[str, Any] = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , lowerCamelCase )
lowerCAmelCase_ : List[str] = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@slow
def __lowercase ( self : str ) -> List[Any]:
lowerCAmelCase_ : List[str] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCAmelCase_ : Dict = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
lowerCAmelCase_ : int = model(lowerCamelCase )[0]
lowerCAmelCase_ : Union[str, Any] = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 398
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__A : List[Any] = 5_0000
__A : str = 5000
__A , __A : List[str] = os.path.split(__file__)
__A : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : List[Any] ):
'''simple docstring'''
for i in range(A__ ):
lowerCAmelCase_ : str = dataset[i]
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : Dict , A__ : Union[str, Any] ):
'''simple docstring'''
for i in range(0 , len(A__ ) , A__ ):
lowerCAmelCase_ : Optional[int] = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : Union[str, Any] , A__ : List[str] ):
'''simple docstring'''
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
lowerCAmelCase_ : List[Any] = dataset[i]
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : Union[str, Any] , A__ : Optional[Any] , A__ : int ):
'''simple docstring'''
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
lowerCAmelCase_ : Tuple = dataset[i : i + batch_size]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
lowerCAmelCase_ : List[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
lowerCAmelCase_ : Dict = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
lowerCAmelCase_ : Dict = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
lowerCAmelCase_ : str = generate_example_dataset(
os.path.join(A__ , """dataset.arrow""" ) , A__ , num_examples=A__ , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
lowerCAmelCase_ : List[str] = func(A__ , **A__ )
print("""shuffling dataset""" )
lowerCAmelCase_ : Tuple = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(A__ ) )
lowerCAmelCase_ : List[str] = func(
A__ , **A__ )
with open(A__ , """wb""" ) as f:
f.write(json.dumps(A__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 398
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666
|
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCamelCase : Optional[int] = {
"""E""": 1_2.7_0,
"""T""": 9.0_6,
"""A""": 8.1_7,
"""O""": 7.5_1,
"""I""": 6.9_7,
"""N""": 6.7_5,
"""S""": 6.3_3,
"""H""": 6.0_9,
"""R""": 5.9_9,
"""D""": 4.2_5,
"""L""": 4.0_3,
"""C""": 2.7_8,
"""U""": 2.7_6,
"""M""": 2.4_1,
"""W""": 2.3_6,
"""F""": 2.2_3,
"""G""": 2.0_2,
"""Y""": 1.9_7,
"""P""": 1.9_3,
"""B""": 1.2_9,
"""V""": 0.9_8,
"""K""": 0.7_7,
"""J""": 0.1_5,
"""X""": 0.1_5,
"""Q""": 0.1_0,
"""Z""": 0.0_7,
}
__lowerCamelCase : Union[str, Any] = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
__lowerCamelCase : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return x[0]
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = get_letter_count(__UpperCAmelCase )
lowerCamelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__UpperCAmelCase )
lowerCamelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__UpperCAmelCase )
lowerCamelCase_ : Any = ''''''.join(freq_to_letter[freq] )
lowerCamelCase_ : Union[str, Any] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__UpperCAmelCase , reverse=__UpperCAmelCase )
lowerCamelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__UpperCAmelCase )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[int] = get_frequency_order(__UpperCAmelCase )
lowerCamelCase_ : Tuple = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 501
| 0
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase_: int = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
lowerCAmelCase_ , lowerCAmelCase_: str = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
lowerCAmelCase_: List[str] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
lowerCAmelCase_: List[str] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase_: Optional[int] = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 668
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCAmelCase )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE__ = DDIMScheduler()
SCREAMING_SNAKE_CASE__ = self.dummy_vq_model
SCREAMING_SNAKE_CASE__ = LDMPipeline(unet=__UpperCAmelCase , vqvae=__UpperCAmelCase , scheduler=__UpperCAmelCase )
ldm.to(__UpperCAmelCase )
ldm.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ldm(generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ldm(generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''numpy''' , return_dict=__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
SCREAMING_SNAKE_CASE__ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(__UpperCAmelCase )
ldm.set_progress_bar_config(disable=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ldm(generator=__UpperCAmelCase , num_inference_steps=5 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
SCREAMING_SNAKE_CASE__ = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 100
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__SCREAMING_SNAKE_CASE = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__SCREAMING_SNAKE_CASE = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__SCREAMING_SNAKE_CASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> dict[str, int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : tuple ) -> str:
"""simple docstring"""
return x[0]
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =get_letter_count(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : dict[int, list[str]] ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : dict[int, str] ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =''.join(freq_to_letter[freq] )
SCREAMING_SNAKE_CASE_ : List[Any] =list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase_ ,reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : list[str] =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =get_frequency_order(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220
| 0
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 21
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 687
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Optional[int] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=1_3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=9_9 , UpperCAmelCase__=3_2 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=3_7 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=1_6 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__="None" , UpperCAmelCase__=3 , UpperCAmelCase__=4 , UpperCAmelCase__=None , ) -> List[str]:
_A : Tuple = parent
_A : List[Any] = batch_size
_A : str = seq_length
_A : Tuple = is_training
_A : Tuple = use_input_mask
_A : Tuple = use_token_type_ids
_A : Any = use_labels
_A : Tuple = vocab_size
_A : List[Any] = hidden_size
_A : Tuple = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : str = intermediate_size
_A : Dict = hidden_act
_A : int = hidden_dropout_prob
_A : Optional[int] = attention_probs_dropout_prob
_A : List[Any] = max_position_embeddings
_A : Tuple = type_vocab_size
_A : str = type_sequence_label_size
_A : Tuple = initializer_range
_A : Union[str, Any] = num_labels
_A : Union[str, Any] = num_choices
_A : Optional[Any] = relative_attention
_A : Optional[Any] = position_biased_input
_A : int = pos_att_type
_A : int = scope
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[int] = None
if self.use_input_mask:
_A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A : List[str] = None
if self.use_token_type_ids:
_A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Tuple = None
_A : Any = None
_A : List[Any] = None
if self.use_labels:
_A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A : int = ids_tensor([self.batch_size] , self.num_choices )
_A : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ) -> Any:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowerCamelCase ( self ) -> str:
_A : List[str] = self.get_config()
_A : Union[str, Any] = 3_0_0
return config
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Optional[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
_A : List[Any] = DebertaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
_A : Tuple = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )[0]
_A : Any = model(UpperCAmelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
_A : Any = DebertaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : List[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
_A : Any = self.num_labels
_A : List[Any] = DebertaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
_A : List[Any] = self.num_labels
_A : int = DebertaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Dict = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
_A : Optional[Any] = DebertaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_A : Union[str, Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self ) -> Optional[int]:
_A : Tuple = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : Union[str, Any] = config_and_inputs
_A : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _lowerCamelCase ( self ) -> Dict:
_A : str = DebertaModelTester(self )
_A : int = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def _lowerCamelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> int:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Dict:
_A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> int:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> List[str]:
_A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase__ )
@slow
def _lowerCamelCase ( self ) -> int:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[int] = DebertaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _lowerCamelCase ( self ) -> int:
pass
@slow
def _lowerCamelCase ( self ) -> int:
_A : Dict = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_A : List[str] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_A : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
# compare the actual values for a slice.
_A : Optional[Any] = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 417
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = ["""pixel_values"""]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = PILImageResampling.BILINEAR , UpperCAmelCase__ = True , UpperCAmelCase__ = 1 / 2_5_5 , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> None:
super().__init__(**UpperCAmelCase__ )
_A : Tuple = size if size is not None else {'''shortest_edge''': 2_2_4}
_A : Any = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_A : Tuple = crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_A : Optional[int] = get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' )
_A : Union[str, Any] = do_resize
_A : int = size
_A : Union[str, Any] = resample
_A : List[str] = do_rescale
_A : Union[str, Any] = rescale_factor
_A : int = do_center_crop
_A : Union[str, Any] = crop_size
_A : int = do_flip_channel_order
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = PIL.Image.BILINEAR , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
_A : Optional[Any] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_A : Any = get_resize_output_image_size(UpperCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
_A : List[Any] = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> Optional[int]:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> np.ndarray:
return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ) -> PIL.Image.Image:
_A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_A : Any = resample if resample is not None else self.resample
_A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_A : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_A : Optional[int] = size if size is not None else self.size
_A : Tuple = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_A : Any = crop_size if crop_size is not None else self.crop_size
_A : str = get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' )
_A : Union[str, Any] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
_A : Tuple = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
_A : List[str] = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
_A : Union[str, Any] = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
_A : str = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_A : Dict = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images]
_A : int = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_A : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Optional[int]:
_A : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCAmelCase__ ):
_A : int = target_sizes.numpy()
_A : Optional[int] = []
for idx in range(len(UpperCAmelCase__ ) ):
_A : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ )
_A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
_A : Any = logits.argmax(dim=1 )
_A : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 417
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Tuple=False ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = "backbone." if is_semantic else ""
a__ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", "beit.embeddings.cls_token"),
(F"{prefix}patch_embed.proj.weight", "beit.embeddings.patch_embeddings.projection.weight"),
(F"{prefix}patch_embed.proj.bias", "beit.embeddings.patch_embeddings.projection.bias"),
(F"{prefix}pos_embed", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase__ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Any=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
a__ : int = "backbone." if is_semantic else ""
# queries, keys and values
a__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
a__ : Optional[int] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
a__ : Any = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
a__ : str = in_proj_weight[
: config.hidden_size, :
]
a__ : Tuple = q_bias
a__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : int = in_proj_weight[
-config.hidden_size :, :
]
a__ : Tuple = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
a__ : Optional[int] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
a__ : Tuple = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
a__ : Optional[int] = gamma_a
a__ : List[Any] = gamma_a
def lowercase__ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = dct.pop(lowerCAmelCase__ )
a__ : Tuple = val
def lowercase__ ( ) -> str:
'''simple docstring'''
a__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Optional[int] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=False ) -> int:
'''simple docstring'''
a__ : List[str] = False if "rvlcdip" in checkpoint_url else True
a__ : str = BeitConfig(use_absolute_position_embeddings=lowerCAmelCase__ , use_mask_token=lowerCAmelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
a__ : Optional[int] = 1_0_2_4
a__ : List[str] = 4_0_9_6
a__ : List[str] = 2_4
a__ : List[str] = 1_6
# labels
if "rvlcdip" in checkpoint_url:
a__ : Union[str, Any] = 1_6
a__ : Tuple = "huggingface/label-files"
a__ : Optional[Any] = "rvlcdip-id2label.json"
a__ : Any = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
a__ : Union[str, Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
a__ : Dict = idalabel
a__ : int = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
a__ : List[str] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="cpu" )["model"]
a__ : str = create_rename_keys(lowerCAmelCase__ , has_lm_head=lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , has_lm_head=lowerCAmelCase__ )
# load HuggingFace model
a__ : Any = BeitForMaskedImageModeling(lowerCAmelCase__ ) if has_lm_head else BeitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
a__ : Optional[Any] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase__ )
a__ : int = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
a__ : List[Any] = encoding["pixel_values"]
a__ : Optional[Any] = model(lowerCAmelCase__ )
a__ : Tuple = outputs.logits
# verify logits
a__ : Optional[int] = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(lowerCAmelCase__ ), "Shape of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
if has_lm_head:
a__ : int = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
a__ : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCAmelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__UpperCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 642
|
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__UpperCAmelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
a__ : Dict = tmp_path_factory.getbasetemp() / "cache"
a__ : int = test_hf_cache_home / "datasets"
a__ : Tuple = test_hf_cache_home / "metrics"
a__ : Any = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(lowerCAmelCase__ ) )
a__ : Optional[int] = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
a__ : Tuple = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
@pytest.fixture(autouse=lowerCAmelCase__ , scope="session" )
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , lowerCAmelCase__ )
@pytest.fixture
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , lowerCAmelCase__ )
| 642
| 1
|
import random
class lowerCAmelCase_ :
"""simple docstring"""
@staticmethod
def __lowercase( _SCREAMING_SNAKE_CASE ) -> tuple[list[int], list[int]]:
__UpperCamelCase = [ord(_SCREAMING_SNAKE_CASE ) for i in text]
__UpperCamelCase = []
__UpperCamelCase = []
for i in plain:
__UpperCamelCase = random.randint(1 , 300 )
__UpperCamelCase = (i + k) * k
cipher.append(_SCREAMING_SNAKE_CASE )
key.append(_SCREAMING_SNAKE_CASE )
return cipher, key
@staticmethod
def __lowercase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
__UpperCamelCase = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_SCREAMING_SNAKE_CASE ) )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_snake_case , _snake_case = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 713
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_snake_case = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48_000,
'sample_size': 131_072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
}
def _a ( __lowercase , __lowercase ) -> Dict:
"""simple docstring"""
return torch.atana(__lowercase , __lowercase ) / math.pi * 2
def _a ( __lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = torch.sin(t * math.pi / 2 ) ** 2
__UpperCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__lowercase , __lowercase )
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
pass
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__()
__UpperCamelCase = DiffusionAttnUnetaD(_SCREAMING_SNAKE_CASE , n_attn_layers=4 )
__UpperCamelCase = deepcopy(self.diffusion )
__UpperCamelCase = torch.quasirandom.SobolEngine(1 , scramble=_SCREAMING_SNAKE_CASE )
def _a ( __lowercase ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
_snake_case = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
_snake_case = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
_snake_case = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
_snake_case = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
_snake_case = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
_snake_case = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def _a ( __lowercase ) -> Optional[int]:
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _a ( __lowercase ) -> List[Any]:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(__lowercase ) and not isinstance(__lowercase , __lowercase ):
return name.replace(__lowercase , __lowercase )
elif name.startswith(__lowercase ):
return [name.replace(__lowercase , __lowercase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def _a ( __lowercase , __lowercase=13 ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
__UpperCamelCase = 0
if string.startswith('net.3.' ):
depth += 1
__UpperCamelCase = string[6:]
elif string.startswith('net.' ):
__UpperCamelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
__UpperCamelCase = string[7:]
if string.startswith('main.' ):
__UpperCamelCase = string[5:]
# mid block
if string[:2].isdigit():
__UpperCamelCase = string[:2]
__UpperCamelCase = string[2:]
else:
__UpperCamelCase = string[0]
__UpperCamelCase = string[1:]
if depth == max_depth:
__UpperCamelCase = MID_NUM_TO_LAYER[layer_num]
__UpperCamelCase = 'mid_block'
elif depth > 0 and int(__lowercase ) < 7:
__UpperCamelCase = DOWN_NUM_TO_LAYER[layer_num]
__UpperCamelCase = F"""down_blocks.{depth}"""
elif depth > 0 and int(__lowercase ) > 7:
__UpperCamelCase = UP_NUM_TO_LAYER[layer_num]
__UpperCamelCase = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
__UpperCamelCase = DEPTH_0_TO_LAYER[layer_num]
__UpperCamelCase = F"""up_blocks.{max_depth - 1}""" if int(__lowercase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
__UpperCamelCase = string_left[1:]
if "resnets" in new_layer:
__UpperCamelCase = convert_resconv_naming(__lowercase )
elif "attentions" in new_layer:
__UpperCamelCase = convert_attn_naming(__lowercase )
__UpperCamelCase = new_string_left
if not isinstance(__lowercase , __lowercase ):
__UpperCamelCase = prefix + '.' + new_layer + '.' + string_left
else:
__UpperCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def _a ( __lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
__UpperCamelCase = rename(__lowercase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__lowercase , __lowercase ):
__UpperCamelCase = transform_conv_attns(__lowercase , __lowercase , __lowercase )
else:
__UpperCamelCase = v
return new_state_dict
def _a ( __lowercase , __lowercase , __lowercase ) -> List[str]:
"""simple docstring"""
if len(__lowercase ) == 1:
if len(v.shape ) == 3:
# weight
__UpperCamelCase = v[:, :, 0]
else:
# bias
__UpperCamelCase = v
else:
# qkv matrices
__UpperCamelCase = v.shape[0]
__UpperCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__UpperCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__UpperCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _a ( __lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
__UpperCamelCase = download(__lowercase )
__UpperCamelCase = MODELS_MAP[model_name]['sample_rate']
__UpperCamelCase = MODELS_MAP[model_name]['sample_size']
__UpperCamelCase = Object()
__UpperCamelCase = sample_size
__UpperCamelCase = sample_rate
__UpperCamelCase = 0
__UpperCamelCase = UNetaDModel(sample_size=__lowercase , sample_rate=__lowercase )
__UpperCamelCase = diffusers_model.state_dict()
__UpperCamelCase = DiffusionUncond(__lowercase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__lowercase )['state_dict'] )
__UpperCamelCase = orig_model.diffusion_ema.eval()
__UpperCamelCase = orig_model.state_dict()
__UpperCamelCase = rename_orig_weights(__lowercase )
__UpperCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__UpperCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__lowercase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(__lowercase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
__UpperCamelCase = value.squeeze()
__UpperCamelCase = value
diffusers_model.load_state_dict(__lowercase )
__UpperCamelCase = 100
__UpperCamelCase = 33
__UpperCamelCase = IPNDMScheduler(num_train_timesteps=__lowercase )
__UpperCamelCase = torch.manual_seed(__lowercase )
__UpperCamelCase = torch.randn([1, 2, config.sample_size] , generator=__lowercase ).to(__lowercase )
__UpperCamelCase = torch.linspace(1 , 0 , steps + 1 , device=__lowercase )[:-1]
__UpperCamelCase = get_crash_schedule(__lowercase )
__UpperCamelCase = DanceDiffusionPipeline(unet=__lowercase , scheduler=__lowercase )
__UpperCamelCase = torch.manual_seed(33 )
__UpperCamelCase = pipe(num_inference_steps=__lowercase , generator=__lowercase ).audios
__UpperCamelCase = sampling.iplms_sample(__lowercase , __lowercase , __lowercase , {} )
__UpperCamelCase = generated.clamp(-1 , 1 )
__UpperCamelCase = (generated - audio).abs().sum()
__UpperCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , __lowercase )
print('Diff max' , __lowercase )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
_snake_case = parser.parse_args()
main(args)
| 567
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = BlenderbotSmallTokenizer
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
super().setUp()
_a = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_a = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
_a = '''adapt act apte'''
_a = '''adapt act apte'''
return input_text, output_text
def a__ (self ) -> Any:
"""simple docstring"""
_a = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt act apte'''
_a = ['''adapt''', '''act''', '''ap@@''', '''te''']
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_384]
_a = '''I am a small frog.'''
_a = tok([src_text] , padding=A , truncation=A )['''input_ids''']
_a = tok.batch_decode(A , skip_special_tokens=A , clean_up_tokenization_spaces=A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def a__ (self ) -> int:
"""simple docstring"""
_a = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_a = '''I am a small frog .'''
_a = '''.'''
_a = tok(A )['''input_ids''']
_a = tok(A )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 11
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , _A , _A=13 , _A=32 , _A=2 , _A=3 , _A=16 , _A=[32, 64, 128] , _A=[1, 2, 1] , _A=[2, 2, 4] , _A=2 , _A=2.0 , _A=True , _A=0.0 , _A=0.0 , _A=0.1 , _A="gelu" , _A=False , _A=True , _A=0.02 , _A=1e-5 , _A=True , _A=None , _A=True , _A=10 , _A=8 , _A=["stage1", "stage2"] , _A=[1, 2] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : Any = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Optional[int] = embed_dim
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : str = depths
_UpperCAmelCase : Optional[Any] = num_heads
_UpperCAmelCase : List[Any] = window_size
_UpperCAmelCase : Dict = mlp_ratio
_UpperCAmelCase : Union[str, Any] = qkv_bias
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = drop_path_rate
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Dict = use_absolute_embeddings
_UpperCAmelCase : Union[str, Any] = patch_norm
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : Optional[int] = scope
_UpperCAmelCase : List[str] = use_labels
_UpperCAmelCase : Any = type_sequence_label_size
_UpperCAmelCase : Tuple = encoder_stride
_UpperCAmelCase : Optional[int] = out_features
_UpperCAmelCase : int = out_indices
def snake_case__ ( self) -> str:
"""simple docstring"""
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase : Optional[Any] = None
if self.use_labels:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self , _A , _A , _A) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = FocalNetModel(config=_A)
model.to(_A)
model.eval()
_UpperCAmelCase : Tuple = model(_A)
_UpperCAmelCase : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_UpperCAmelCase : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def snake_case__ ( self , _A , _A , _A) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = FocalNetBackbone(config=_A)
model.to(_A)
model.eval()
_UpperCAmelCase : Optional[int] = model(_A)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = FocalNetBackbone(config=_A)
model.to(_A)
model.eval()
_UpperCAmelCase : Union[str, Any] = model(_A)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def snake_case__ ( self , _A , _A , _A) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : str = FocalNetForMaskedImageModeling(config=_A)
model.to(_A)
model.eval()
_UpperCAmelCase : List[str] = model(_A)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Dict = FocalNetForMaskedImageModeling(_A)
model.to(_A)
model.eval()
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCAmelCase : List[Any] = model(_A)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def snake_case__ ( self , _A , _A , _A) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.type_sequence_label_size
_UpperCAmelCase : List[Any] = FocalNetForImageClassification(_A)
model.to(_A)
model.eval()
_UpperCAmelCase : Any = model(_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_UpperCAmelCase : Any = 1
_UpperCAmelCase : str = FocalNetForImageClassification(_A)
model.to(_A)
model.eval()
_UpperCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_UpperCAmelCase : List[Any] = model(_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = config_and_inputs
_UpperCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Dict = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Any = False
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = FocalNetModelTester(self)
_UpperCAmelCase : List[str] = ConfigTester(self , config_class=_A , embed_dim=37 , has_text_modality=_A)
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self) -> str:
"""simple docstring"""
return
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_A)
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A)
def snake_case__ ( self) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
@unittest.skip(reason='''FocalNet does not use inputs_embeds''')
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''')
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Tuple = model_class(_A)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear))
def snake_case__ ( self) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : str = model_class(_A)
_UpperCAmelCase : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A)
def snake_case__ ( self , _A , _A , _A , _A) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = model_class(_A)
model.to(_A)
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(_A , _A))
_UpperCAmelCase : str = outputs.hidden_states
_UpperCAmelCase : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(_A) , _A)
# FocalNet has a different seq_length
_UpperCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_A) , _A)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = reshaped_hidden_states[0].shape
_UpperCAmelCase : Union[str, Any] = (
reshaped_hidden_states[0].view(_A , _A , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : int = True
self.check_hidden_states_output(_A , _A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(_A , _A , _A , _A)
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = 3
_UpperCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Any = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : str = True
self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width))
@slow
def snake_case__ ( self) -> int:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[Any] = FocalNetModel.from_pretrained(_A)
self.assertIsNotNone(_A)
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Dict = _config_zero_init(_A)
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = model_class(config=_A)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''') if is_vision_available() else None
@slow
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''').to(_A)
_UpperCAmelCase : Optional[Any] = self.default_image_processor
_UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
_UpperCAmelCase : List[str] = image_processor(images=_A , return_tensors='''pt''').to(_A)
# forward pass
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**_A)
# verify the logits
_UpperCAmelCase : Dict = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _A)
_UpperCAmelCase : Optional[Any] = torch.tensor([0.2166, -0.4368, 0.2191]).to(_A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 281)
@require_torch
class A_ ( __lowercase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = (FocalNetBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Optional[int] = FocalNetConfig
_SCREAMING_SNAKE_CASE : List[Any] = False
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = FocalNetModelTester(self)
| 485
| 0
|
'''simple docstring'''
from math import factorial
A_ = {str(digit): factorial(digit) for digit in range(10)}
def A_ ( snake_case ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A_ ( snake_case = 60 , snake_case = 1000000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
SCREAMING_SNAKE_CASE:List[str] = 0
# the cached sizes of the previous chains
SCREAMING_SNAKE_CASE:Optional[Any] = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
SCREAMING_SNAKE_CASE:Union[str, Any] = set()
SCREAMING_SNAKE_CASE:Any = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
SCREAMING_SNAKE_CASE:str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
SCREAMING_SNAKE_CASE:List[Any] = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
SCREAMING_SNAKE_CASE:Optional[int] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 709
|
'''simple docstring'''
import random
def A_ ( snake_case , snake_case , snake_case = False ):
SCREAMING_SNAKE_CASE:dict = {i: [] for i in range(snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case ):
for j in range(i + 1 , snake_case ):
if random.random() < probability:
graph[i].append(snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case )
return graph
def A_ ( snake_case ):
return {
i: [j for j in range(snake_case ) if i != j] for i in range(snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 465
| 0
|
'''simple docstring'''
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ):
__lowercase = 0
__lowercase = 0
__lowercase = {}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ):
if vertex not in self.adjacency:
__lowercase = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ):
self.add_vertex(lowercase__ )
self.add_vertex(lowercase__ )
if head == tail:
return
__lowercase = weight
__lowercase = weight
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_edges()
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase__ ) ):
__lowercase = list(edges[i] )
edges.sort(key=lambda lowercase__ : e[2] )
for i in range(len(lowercase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowercase = edges[i][2] + 1
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = weight
__lowercase = weight
def __str__( self : Union[str, Any] ):
__lowercase = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowercase = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : str=None ,lowercase__ : Any=None ):
__lowercase = Graph()
if vertices is None:
__lowercase = []
if edges is None:
__lowercase = []
for vertex in vertices:
g.add_vertex(lowercase__ )
for edge in edges:
g.add_edge(*lowercase__ )
return g
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = {}
__lowercase = {}
def __len__( self : Dict ):
return len(self.parent )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ):
if item in self.parent:
return self.find(lowercase__ )
__lowercase = item
__lowercase = 0
return item
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ):
if item not in self.parent:
return self.make_set(lowercase__ )
if item != self.parent[item]:
__lowercase = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Tuple ,lowercase__ : Dict ):
__lowercase = self.find(lowercase__ )
__lowercase = self.find(lowercase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowercase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowercase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowercase = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Optional[int] ):
__lowercase = graph.num_vertices
__lowercase = Graph.UnionFind()
__lowercase = []
while num_components > 1:
__lowercase = {}
for vertex in graph.get_vertices():
__lowercase = -1
__lowercase = graph.get_edges()
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = union_find.find(lowercase__ )
__lowercase = union_find.find(lowercase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowercase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowercase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowercase , __lowercase , __lowercase = cheap_edge[vertex]
if union_find.find(lowercase__ ) != union_find.find(lowercase__ ):
union_find.union(lowercase__ ,lowercase__ )
mst_edges.append(cheap_edge[vertex] )
__lowercase = num_components - 1
__lowercase = Graph.build(edges=lowercase__ )
return mst
| 41
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase ={"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333
| 0
|
import os
def lowerCAmelCase__ ( ):
with open(os.path.dirname(_a ) + "/p022_names.txt" ) as file:
snake_case_ : List[str] = str(file.readlines()[0] )
snake_case_ : Any = names.replace("\"" , "" ).split("," )
names.sort()
snake_case_ : Dict = 0
snake_case_ : Tuple = 0
for i, name in enumerate(_a ):
for letter in name:
name_score += ord(_a ) - 64
total_score += (i + 1) * name_score
snake_case_ : Union[str, Any] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Any = logging.get_logger(__name__)
lowercase : str = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'donut-swin'
A : Union[str, Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = image_size
snake_case_ : Any = patch_size
snake_case_ : str = num_channels
snake_case_ : Dict = embed_dim
snake_case_ : Tuple = depths
snake_case_ : List[Any] = len(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = num_heads
snake_case_ : Optional[int] = window_size
snake_case_ : Any = mlp_ratio
snake_case_ : str = qkv_bias
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : str = drop_path_rate
snake_case_ : List[str] = hidden_act
snake_case_ : Optional[int] = use_absolute_embeddings
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ : Any = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
| 114
| 0
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCAmelCase_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 81
|
'''simple docstring'''
import numpy as np
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = int(np.ceil((x_end - xa) / h ) )
__lowerCamelCase = np.zeros((n + 1,) )
__lowerCamelCase = ya
__lowerCamelCase = xa
for k in range(UpperCamelCase__ ):
__lowerCamelCase = f(UpperCamelCase__ , y[k] )
__lowerCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCamelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCamelCase = f(x + h , y[k] + h * ka )
__lowerCamelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = KandinskyVaaPriorPipeline
_lowerCAmelCase : Dict = ['prompt']
_lowerCAmelCase : List[str] = ['prompt', 'negative_prompt']
_lowerCAmelCase : Optional[int] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_lowerCAmelCase : Dict = False
@property
def __lowercase ( self : str ):
"""simple docstring"""
return 32
@property
def __lowercase ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def __lowercase ( self : Tuple ):
"""simple docstring"""
return self.time_input_dim
@property
def __lowercase ( self : str ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase ( self : Dict ):
"""simple docstring"""
return 1_00
@property
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowercase ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowerCAmelCase__ )
@property
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
SCREAMING_SNAKE_CASE : Optional[int] = PriorTransformer(**lowerCAmelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowercase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
SCREAMING_SNAKE_CASE : Tuple = CLIPVisionModelWithProjection(lowerCAmelCase__ )
return model
@property
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.dummy_prior
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_image_encoder
SCREAMING_SNAKE_CASE : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[str] = self.dummy_image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=lowerCAmelCase__ , clip_sample_range=10.0 , )
SCREAMING_SNAKE_CASE : List[str] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str=0 ):
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = '''cpu'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : str = output.image_embeds
SCREAMING_SNAKE_CASE : Tuple = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
SCREAMING_SNAKE_CASE : List[Any] = image[0, -10:]
SCREAMING_SNAKE_CASE : List[str] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
SCREAMING_SNAKE_CASE : List[str] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Optional[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , test_mean_pixel_difference=lowerCAmelCase__ , )
@skip_mps
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase__ , test_mean_pixel_difference=lowerCAmelCase__ , )
| 464
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''do_resize''': True,
'''size''': {'''height''': 2_24, '''width''': 2_24},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : str , **lowerCAmelCase__ : int ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __lowercase ( self : Any , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __lowercase ( self : List[Any] , **lowerCAmelCase__ : Dict ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Dict = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[Any] = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : List[Any] = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : List[str] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Any = processor.batch_decode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = ChineseCLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Any = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 464
| 1
|
from itertools import permutations
def UpperCamelCase (lowercase_: tuple ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A__ : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(lowercase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCamelCase (lowercase_: int = 10 ) -> int:
return sum(
int("""""".join(map(lowercase_ , lowercase_ ) ) )
for num in permutations(range(lowercase_ ) )
if is_substring_divisible(lowercase_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 456
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
def UpperCamelCase (lowercase_: "pyspark.sql.DataFrame" , lowercase_: List[int] , ) -> Dict:
import pyspark
def generate_fn():
A__ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
A__ : List[Any] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
A__ : Optional[int] = partition_df.collect()
A__ : Any = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _a (_BaseExamplesIterable ):
'''simple docstring'''
def __init__( self , A__ , A__=None , ):
A__ : List[str] = df
A__ : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
A__ : Tuple = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def __A ( self , A__ ):
A__ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(A__ )
return SparkExamplesIterable(self.df , partition_order=A__ )
def __A ( self , A__ , A__ ):
A__ : Optional[int] = self.split_shard_indices_by_worker(A__ , A__ )
return SparkExamplesIterable(self.df , partition_order=A__ )
@property
def __A ( self ):
return len(self.partition_order )
class _a (datasets.DatasetBuilder ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = SparkConfig
def __init__( self , A__ , A__ = None , A__ = None , **A__ , ):
import pyspark
A__ : Dict = pyspark.sql.SparkSession.builder.getOrCreate()
A__ : int = df
A__ : Any = working_dir
super().__init__(
cache_dir=A__ , config_name=str(self.df.semanticHash() ) , **A__ , )
def __A ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(A__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=A__ )
A__ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(A__ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
A__ : int = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(A__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __A ( self , A__ ):
import pyspark
def get_arrow_batch_size(A__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
A__ : Dict = self.df.count()
A__ : List[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
A__ : Union[str, Any] = (
self.df.limit(A__ )
.repartition(1 )
.mapInArrow(A__ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
A__ : str = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
A__ : Dict = min(A__ , int(approx_total_size / max_shard_size ) )
A__ : int = self.df.repartition(A__ )
def __A ( self , A__ , A__ , A__ , ):
import pyspark
A__ : Optional[int] = ParquetWriter if file_format == """parquet""" else ArrowWriter
A__ : Any = os.path.join(self._working_dir , os.path.basename(A__ ) ) if self._working_dir else fpath
A__ : Union[str, Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
A__ : str = self.config.features
A__ : Union[str, Any] = self._writer_batch_size
A__ : Tuple = self._fs.storage_options
def write_arrow(A__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
A__ : str = pyspark.TaskContext().taskAttemptId()
A__ : str = next(A__ , A__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
A__ : List[Any] = 0
A__ : Optional[Any] = writer_class(
features=A__ , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=A__ , storage_options=A__ , embed_local_files=A__ , )
A__ : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(A__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
A__ , A__ : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
A__ : Tuple = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=A__ , storage_options=A__ , embed_local_files=A__ , )
A__ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(A__ )
if writer._num_bytes > 0:
A__ , A__ : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(A__ ) ):
A__ : Optional[Any] = os.path.join(os.path.dirname(A__ ) , os.path.basename(A__ ) )
shutil.move(A__ , A__ )
A__ : Tuple = (
self.df.mapInArrow(A__ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __A ( self , A__ , A__ = "arrow" , A__ = None , A__ = None , **A__ , ):
self._validate_cache_dir()
A__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(A__ )
A__ : Any = not is_remote_filesystem(self._fs )
A__ : Optional[int] = os.path.join if is_local else posixpath.join
A__ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
A__ : Any = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
A__ : Any = path_join(self._output_dir , A__ )
A__ : Tuple = 0
A__ : str = 0
A__ : List[Any] = 0
A__ : List[Any] = []
A__ : Optional[Any] = []
for task_id, content in self._prepare_split_single(A__ , A__ , A__ ):
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(A__ )
A__ : Optional[int] = total_num_examples
A__ : Union[str, Any] = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
A__ : int = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
A__ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
A__ , A__ , A__ , ):
rename(
A__ , fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , F"""{global_shard_id:05d}""" ).replace("""NNNNN""" , F"""{total_shards:05d}""" ) , )
A__ : List[Any] = []
A__ : Union[str, Any] = 0
for i in range(len(A__ ) ):
A__ , A__ : Optional[int] = task_id_and_num_shards[i]
for shard_id in range(A__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(A__ , len(A__ ) ).map(lambda A__ : _rename_shard(*A__ ) ).collect()
else:
# don't use any pattern
A__ : List[Any] = 0
A__ : List[str] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace(A__ , """""" ) , )
def __A ( self , A__ , ):
return SparkExamplesIterable(self.df )
| 456
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]=1_3 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=2_2_4 , lowerCAmelCase__ : str=3_0 , lowerCAmelCase__ : Optional[Any]=4_0_0 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict=[0.5, 0.5, 0.5] , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 1_8, "width": 1_8}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __UpperCamelCase ( self : str) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a_( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__snake_case : str =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = EfficientFormerImageProcessorTester(self)
@property
def __UpperCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
def __UpperCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str) -> Dict:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processor(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def __UpperCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processor(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def __UpperCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processor(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 707
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class a_( unittest.TestCase ):
"""simple docstring"""
def __UpperCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/'))
SCREAMING_SNAKE_CASE = self.transformer_dir
shutil.copy(
os.path.join(lowerCAmelCase__ , 'src/transformers/models/bert/modeling_bert.py') , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py') , )
def __UpperCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def __UpperCamelCase ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
SCREAMING_SNAKE_CASE = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = os.path.join(self.transformer_dir , 'new_code.py')
with open(lowerCAmelCase__ , 'w' , newline='\n') as f:
f.write(lowerCAmelCase__)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase__)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase__)
with open(lowerCAmelCase__ , 'r') as f:
self.assertTrue(f.read() , lowerCAmelCase__)
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead')
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def __UpperCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowerCAmelCase__ , lowerCAmelCase__) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCAmelCase__ , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCAmelCase__) , )
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
self.assertFalse(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md(
lowerCAmelCase__ , lowerCAmelCase__ , localized_readme['format_model_list'])
# Check if the model link is synchronized.
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 259
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Dict = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661
|
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError('''Invalid input''' )
_lowerCamelCase = 10**n
_lowerCamelCase = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 661
| 1
|
"""simple docstring"""
import random
class lowercase :
@staticmethod
def a_ ( _lowerCamelCase : str ):
"""simple docstring"""
A_ : str = [ord(_lowerCamelCase ) for i in text]
A_ : Optional[Any] = []
A_ : Optional[int] = []
for i in plain:
A_ : List[str] = random.randint(1 , 3_00 )
A_ : Union[str, Any] = (i + k) * k
cipher.append(_lowerCamelCase )
key.append(_lowerCamelCase )
return cipher, key
@staticmethod
def a_ ( _lowerCamelCase : list[int] , _lowerCamelCase : list[int] ):
"""simple docstring"""
A_ : int = []
for i in range(len(_lowerCamelCase ) ):
A_ : Union[str, Any] = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_lowerCamelCase ) )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase , _lowerCamelCase : Optional[Any] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 361
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : int = StableDiffusionXLImgaImgPipeline
__lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__lowerCAmelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
A_ : Tuple = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
A_ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , )
A_ : Any = CLIPTextModel(_lowerCamelCase )
A_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_lowerCamelCase )
A_ : int = CLIPTextModelWithProjection(_lowerCamelCase )
A_ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_lowerCamelCase )
A_ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a_ ( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[Any]=0 ):
"""simple docstring"""
A_ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : Optional[Any] = image / 2 + 0.5
if str(_lowerCamelCase ).startswith('''mps''' ):
A_ : str = torch.manual_seed(_lowerCamelCase )
else:
A_ : Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : Dict = self.get_dummy_components()
A_ : Dict = StableDiffusionXLImgaImgPipeline(**_lowerCamelCase )
A_ : int = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Union[str, Any] = self.get_dummy_inputs(_lowerCamelCase )
A_ : Tuple = sd_pipe(**_lowerCamelCase ).images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : Union[str, Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self : Optional[int] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def a_ ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def a_ ( self : List[Any] ):
"""simple docstring"""
pass
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Optional[Any] = self.get_dummy_components()
A_ : int = StableDiffusionXLImgaImgPipeline(**_lowerCamelCase )
A_ : Dict = sd_pipe.to(_lowerCamelCase )
A_ : List[Any] = sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
# forward without prompt embeds
A_ : Any = self.get_dummy_inputs(_lowerCamelCase )
A_ : Union[str, Any] = 3 * ['''this is a negative prompt''']
A_ : Any = negative_prompt
A_ : Any = 3 * [inputs['''prompt''']]
A_ : Union[str, Any] = sd_pipe(**_lowerCamelCase )
A_ : List[str] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
A_ : Optional[Any] = self.get_dummy_inputs(_lowerCamelCase )
A_ : str = 3 * ['''this is a negative prompt''']
A_ : List[str] = 3 * [inputs.pop('''prompt''' )]
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : List[str] = sd_pipe.encode_prompt(_lowerCamelCase , negative_prompt=_lowerCamelCase )
A_ : str = sd_pipe(
**_lowerCamelCase , prompt_embeds=_lowerCamelCase , negative_prompt_embeds=_lowerCamelCase , pooled_prompt_embeds=_lowerCamelCase , negative_pooled_prompt_embeds=_lowerCamelCase , )
A_ : Any = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase):
def a_ ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]="cpu" , _lowerCamelCase : Optional[Any]=torch.floataa , _lowerCamelCase : Optional[int]=0 ):
"""simple docstring"""
A_ : int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : List[Any] = np.random.RandomState(_lowerCamelCase ).standard_normal((1, 4, 64, 64) )
A_ : Union[str, Any] = torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase , dtype=_lowerCamelCase )
A_ : Optional[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : str = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[Any] = self.get_inputs(_lowerCamelCase )
A_ : Union[str, Any] = pipe(**_lowerCamelCase ).images
A_ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
A_ : int = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 361
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.