code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class A ( UpperCAmelCase ):
a_ = '''git_vision_model'''
def __init__( self : Any , __a : Any=7_6_8 , __a : Dict=3_0_7_2 , __a : Optional[Any]=1_2 , __a : List[str]=1_2 , __a : Tuple=3 , __a : Union[str, Any]=2_2_4 , __a : str=1_6 , __a : Dict="quick_gelu" , __a : Optional[Any]=1e-5 , __a : Optional[int]=0.0 , __a : str=0.0_2 , **__a : str , ) -> Optional[int]:
super().__init__(**__a )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = hidden_act
@classmethod
def snake_case__ ( cls : int , __a : Union[str, os.PathLike] , **__a : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__a )
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class A ( UpperCAmelCase ):
a_ = '''git'''
def __init__( self : str , __a : str=None , __a : Optional[int]=3_0_5_2_2 , __a : Optional[int]=7_6_8 , __a : Union[str, Any]=6 , __a : Dict=1_2 , __a : Tuple=3_0_7_2 , __a : Dict="gelu" , __a : Union[str, Any]=0.1 , __a : Tuple=0.1 , __a : List[str]=1_0_2_4 , __a : List[Any]=0.0_2 , __a : Tuple=1e-12 , __a : Tuple=0 , __a : Dict="absolute" , __a : List[Any]=True , __a : Any=False , __a : Dict=1_0_1 , __a : List[str]=1_0_2 , __a : Optional[Any]=None , **__a : Dict , ) -> str:
super().__init__(bos_token_id=__a , eos_token_id=__a , pad_token_id=__a , **__a )
if vision_config is None:
__UpperCAmelCase = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__UpperCAmelCase = GitVisionConfig(**__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = tie_word_embeddings
__UpperCAmelCase = num_image_with_embedding
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
def snake_case__ ( self : List[str] ) -> Optional[int]:
__UpperCAmelCase = copy.deepcopy(self.__dict__ )
__UpperCAmelCase = self.vision_config.to_dict()
__UpperCAmelCase = self.__class__.model_type
return output
| 262
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A ( UpperCAmelCase ):
a_ = '''time_series_transformer'''
a_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : str , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "student_t" , __a : str = "nll" , __a : int = 1 , __a : List[int] = [1, 2, 3, 4, 5, 6, 7] , __a : Optional[Union[str, bool]] = "mean" , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : Optional[List[int]] = None , __a : Optional[List[int]] = None , __a : int = 3_2 , __a : int = 3_2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : bool = True , __a : str = "gelu" , __a : int = 6_4 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : int = 1_0_0 , __a : float = 0.0_2 , __a : Optional[int]=True , **__a : str , ) -> int:
# time series specific configuration
__UpperCAmelCase = prediction_length
__UpperCAmelCase = context_length or prediction_length
__UpperCAmelCase = distribution_output
__UpperCAmelCase = loss
__UpperCAmelCase = input_size
__UpperCAmelCase = num_time_features
__UpperCAmelCase = lags_sequence
__UpperCAmelCase = scaling
__UpperCAmelCase = num_dynamic_real_features
__UpperCAmelCase = num_static_real_features
__UpperCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = cardinality
else:
__UpperCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = embedding_dimension
else:
__UpperCAmelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
__UpperCAmelCase = input_size * len(__a ) + self._number_of_features
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def snake_case__ ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 262
| 1
|
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class snake_case ( lowercase_):
def a_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def a_ ( self : int ) -> Any:
'''simple docstring'''
_A = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(a__ )
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = self._create_example_records()
_A = Dataset.from_list(a__ )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(a__ ):
self.assertDictEqual(a__ , example_records[i] )
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
_A = self._create_example_records()
_A = Dataset.from_list(a__ )
_A = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def a_ ( self : Optional[Any] ) -> Any: # checks what happens with missing columns
'''simple docstring'''
_A = [{"col_1": 1}, {"col_2": "x"}]
_A = Dataset.from_list(a__ )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def a_ ( self : Union[str, Any] ) -> Any: # checks if the type can be inferred from the second record
'''simple docstring'''
_A = [{"col_1": []}, {"col_1": [1, 2]}]
_A = Dataset.from_list(a__ )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = Dataset.from_list([] )
self.assertEqual(len(a__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 720
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a__ ( __lowercase ) -> List[Any]:
_A = os.path.join(args.tf_model_dir , "parameters.json" )
_A = json.loads(open(__lowercase ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_A = args.output + ".pt"
_A = OrderedDict()
with tf.device("/CPU:0" ):
_A = tf.train.load_checkpoint(args.tf_model_dir )
_A = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_A = reader.get_tensor(__lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_A = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_A = 8
_A = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/moe" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/softmlp/kernel" ):
_A = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_A = key_name[-9:-7]
for i in range(16 ):
_A = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_A = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/mlp" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p1/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/kernel" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.endswith("/p2/bias" ):
_A = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/ln" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.feed_forward.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.feed_forward.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/att" ):
_A = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_A = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_A = state[:, 0, :, :]
_A = state[:, 1, :, :]
_A = state[:, 2, :, :]
_A = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_A = torch.tensor(__lowercase )
_A = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_A = torch.tensor(__lowercase )
elif key_name.endswith("/o/kernel" ):
_A = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_A = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/an" ):
_A = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_A = "model.blocks.%d.self_attn.norm.bias" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif key_name.endswith("/g" ):
_A = "model.blocks.%d.self_attn.norm.weight" % player
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_A = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_A = "model.%s.weight" % nlayer
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
if key_name.startswith("model/wte" ):
_A = "lm_head.weight"
_A = vnp.copy() # same in embedded
_A = torch.tensor(__lowercase )
elif key_name.startswith("model/wob" ):
_A = "final_logits_bias"
_A = vnp.copy() # same in embedded
_A = state.reshape((1, -1) )
_A = torch.tensor(__lowercase )
elif key_name == "model/dense/kernel":
_A = "model.last_project.weight"
_A = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_A = torch.tensor(__lowercase )
elif key_name == "model/dense_1/bias":
_A = "model.last_project.bias"
_A = vnp.copy() # same because it is one dimensional
_A = torch.tensor(__lowercase )
torch.save(__lowercase , args.output )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
a_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 621
| 0
|
import numpy as np
a =[
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class A_ :
def __init__( self : Dict):
__lowerCamelCase : int = np.array(A__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = np.where(letter == self.SQUARE)
__lowerCamelCase : List[str] = np.concatenate([indexa + 1, indexa + 1])
return indexes
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = message.lower()
__lowerCamelCase : Tuple = message.replace(' ' ,'')
__lowerCamelCase : Optional[int] = message.replace('j' ,'i')
__lowerCamelCase : Any = np.empty((2, len(A__)))
for letter_index in range(len(A__)):
__lowerCamelCase : Any = self.letter_to_numbers(message[letter_index])
__lowerCamelCase : Any = numbers[0]
__lowerCamelCase : int = numbers[1]
__lowerCamelCase : Union[str, Any] = first_step.reshape(2 * len(A__))
__lowerCamelCase : str = ''''''
for numbers_index in range(len(A__)):
__lowerCamelCase : Tuple = int(second_step[numbers_index * 2])
__lowerCamelCase : Tuple = int(second_step[(numbers_index * 2) + 1])
__lowerCamelCase : Union[str, Any] = self.numbers_to_letter(A__ ,A__)
__lowerCamelCase : Union[str, Any] = encoded_message + letter
return encoded_message
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = message.lower()
message.replace(' ' ,'')
__lowerCamelCase : int = np.empty(2 * len(A__))
for letter_index in range(len(A__)):
__lowerCamelCase : Optional[int] = self.letter_to_numbers(message[letter_index])
__lowerCamelCase : Tuple = numbers[0]
__lowerCamelCase : List[Any] = numbers[1]
__lowerCamelCase : Optional[Any] = first_step.reshape((2, len(A__)))
__lowerCamelCase : Any = ''''''
for numbers_index in range(len(A__)):
__lowerCamelCase : Union[str, Any] = int(second_step[0, numbers_index])
__lowerCamelCase : Union[str, Any] = int(second_step[1, numbers_index])
__lowerCamelCase : Dict = self.numbers_to_letter(A__ ,A__)
__lowerCamelCase : List[str] = decoded_message + letter
return decoded_message
| 652
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( lowerCAmelCase__ : Dict ):
a__ , a__ : int = image.size
a__ , a__ : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ : Tuple = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
a__ : List[Any] = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
a__ : Any = image[None].transpose(0 , 3 , 1 , 2 )
a__ : Dict = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , A__ : VQModel , A__ : UNetaDModel , A__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : List[str] , A__ : Union[torch.Tensor, PIL.Image.Image] = None , A__ : Optional[int] = 1 , A__ : Optional[int] = 1_0_0 , A__ : Optional[float] = 0.0 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : Optional[str] = "pil" , A__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(A__ , PIL.Image.Image ):
a__ : List[Any] = 1
elif isinstance(A__ , torch.Tensor ):
a__ : List[str] = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}' )
if isinstance(A__ , PIL.Image.Image ):
a__ : Union[str, Any] = preprocess(A__ )
a__ , a__ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a__ : Optional[int] = next(self.unet.parameters() ).dtype
a__ : List[str] = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
a__ : Any = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
a__ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ : Union[str, Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ : str = {}
if accepts_eta:
a__ : Dict = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
a__ : str = torch.cat([latents, image] , dim=1 )
a__ : Optional[Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
a__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
a__ : List[Any] = self.vqvae.decode(A__ ).sample
a__ : List[Any] = torch.clamp(A__ , -1.0 , 1.0 )
a__ : Optional[Any] = image / 2 + 0.5
a__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Union[str, Any] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 688
| 0
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__UpperCamelCase = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def UpperCamelCase_( _A :str )-> Union[str, Any]:
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
__UpperCamelCase = parser.parse_args()
if args.check_lib:
__UpperCamelCase = importlib.import_module('transformers')
__UpperCamelCase = Path(transformers_module.__file__).parent
else:
__UpperCamelCase = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 185
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCamelCase_( )-> Any:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase__ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase__ = json.loads(_A )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase__ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase__ = json.loads(_A )
if not mpi_options.get("sagemaker_mpi_enabled" , _A ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def snake_case__ ( self ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , snake_case , )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
UpperCamelCase__ = torch.device("cpu" )
UpperCamelCase__ = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase__ = smp.local_rank()
UpperCamelCase__ = torch.device("cuda" , snake_case )
UpperCamelCase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
UpperCamelCase__ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
UpperCamelCase__ = torch.device("cuda" , self.local_rank )
UpperCamelCase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
UpperCamelCase__ = torch.device("cuda" , self.local_rank )
UpperCamelCase__ = 1
if device.type == "cuda":
torch.cuda.set_device(snake_case )
return device
@property
def snake_case__ ( self ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case__ ( self ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def snake_case__ ( self ):
'''simple docstring'''
return False
| 185
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a ={
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 530
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 547
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
# Construct model
if gpta_config_file == "":
snake_case : Tuple =GPTaConfig()
else:
snake_case : int =GPTaConfig.from_json_file(lowerCAmelCase__ )
snake_case : Any =GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
snake_case : int =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case : Any =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
A : Union[str, Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 721
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
A : Tuple = logging.get_logger(__name__)
class lowerCAmelCase_ ( a_ ):
def __init__( self : List[Any], *_snake_case : Dict, **_snake_case : str ):
'''simple docstring'''
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''', _snake_case, )
super().__init__(*_snake_case, **_snake_case )
| 136
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __snake_case (__UpperCAmelCase = 2000000 ):
"""simple docstring"""
lowerCamelCase_ : list[int] = [0]
lowerCamelCase_ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCamelCase_ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowerCamelCase_ : int = 0
# an estimate of b, using the quadratic formula
lowerCamelCase_ : float
# the largest integer less than b_estimate
lowerCamelCase_ : int
# the largest integer less than b_estimate
lowerCamelCase_ : int
# the triangle number corresponding to b_floor
lowerCamelCase_ : int
# the triangle number corresponding to b_ceil
lowerCamelCase_ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowerCamelCase_ : int = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCamelCase_ : Optional[int] = floor(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : int = ceil(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ : Optional[Any] = triangle_numbers[b_floor]
lowerCamelCase_ : Union[str, Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCamelCase_ : str = triangle_b_first_guess * triangle_a
lowerCamelCase_ : int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCamelCase_ : List[Any] = triangle_b_second_guess * triangle_a
lowerCamelCase_ : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"""{solution() = }""")
| 501
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 638
| 0
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase_ = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
lowerCamelCase_ = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
lowerCamelCase_ = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
return float((preds == labels).mean() )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =simple_accuracy(__UpperCamelCase, __UpperCamelCase )
SCREAMING_SNAKE_CASE__ =float(fa_score(y_true=__UpperCamelCase, y_pred=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =float(pearsonr(__UpperCamelCase, __UpperCamelCase )[0] )
SCREAMING_SNAKE_CASE__ =float(spearmanr(__UpperCamelCase, __UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Any ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" ,)
def __A ( self : Optional[int] ,_UpperCamelCase : Tuple ,_UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_UpperCamelCase ,_UpperCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_UpperCamelCase ,_UpperCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_UpperCamelCase ,_UpperCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_UpperCamelCase ,_UpperCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 588
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 588
| 1
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_lowerCamelCase = logging.getLogger(__name__)
_lowerCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
'''simple docstring'''
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case__ )} , )
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase : bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase : bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCamelCase_ ( self : List[str] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class a :
'''simple docstring'''
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase : Optional[str] = field(default=snake_case__ , metadata={'help': 'The input training data file (a text file).'} )
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
lowerCAmelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
lowerCAmelCase : bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCAmelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
lowerCAmelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase : float = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCAmelCase : bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def lowerCamelCase_ ( self : Optional[int] ):
if self.train_file is not None:
UpperCAmelCase_ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
UpperCAmelCase_ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ) -> List[str]:
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = [json.loads(__UpperCamelCase ) for line in f.read().splitlines() if (len(__UpperCamelCase ) > 0 and not line.isspace())]
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
UpperCAmelCase_ = {c: dataset[c] for c in dataset.column_names}
UpperCAmelCase_ = refs
return Dataset.from_dict(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __UpperCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
UpperCAmelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
UpperCAmelCase_ = {}
if data_args.train_file is not None:
UpperCAmelCase_ = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase_ = data_args.validation_file
UpperCAmelCase_ = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
UpperCAmelCase_ = 'text'
UpperCAmelCase_ = load_dataset(__UpperCamelCase , data_files=__UpperCamelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.config_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
UpperCAmelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
UpperCAmelCase_ = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__UpperCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__UpperCamelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
UpperCAmelCase_ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ = AutoModelForMaskedLM.from_config(__UpperCamelCase )
model.resize_token_embeddings(len(__UpperCamelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
UpperCAmelCase_ = datasets['train'].column_names
else:
UpperCAmelCase_ = datasets['validation'].column_names
UpperCAmelCase_ = 'text' if 'text' in column_names else column_names[0]
UpperCAmelCase_ = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(__UpperCamelCase : Optional[int] ):
# Remove empty lines
UpperCAmelCase_ = [line for line in examples['text'] if len(__UpperCamelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=data_args.max_seq_length )
UpperCAmelCase_ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
UpperCAmelCase_ = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
UpperCAmelCase_ = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
UpperCAmelCase_ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
UpperCAmelCase_ = False
# Data collator
# This one will take care of randomly masking the tokens.
UpperCAmelCase_ = DataCollatorForWholeWordMask(tokenizer=__UpperCamelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCAmelCase_ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
UpperCAmelCase_ = model_args.model_name_or_path
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase_ = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
UpperCAmelCase_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = math.exp(eval_output['''eval_loss'''] )
UpperCAmelCase_ = perplexity
UpperCAmelCase_ = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
main()
if __name__ == "__main__":
main()
| 144
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """sew"""
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_=2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
_a : Optional[int] = hidden_size
_a : int = feat_extract_norm
_a : Optional[int] = feat_extract_activation
_a : str = list(lowerCamelCase_ )
_a : Union[str, Any] = list(lowerCamelCase_ )
_a : List[Any] = list(lowerCamelCase_ )
_a : Union[str, Any] = conv_bias
_a : Optional[int] = num_conv_pos_embeddings
_a : Dict = num_conv_pos_embedding_groups
_a : str = len(self.conv_dim )
_a : Any = num_hidden_layers
_a : List[Any] = intermediate_size
_a : Tuple = squeeze_factor
_a : Tuple = hidden_act
_a : Any = num_attention_heads
_a : Optional[int] = hidden_dropout
_a : List[str] = attention_dropout
_a : Optional[Any] = activation_dropout
_a : str = feat_proj_dropout
_a : str = final_dropout
_a : str = layerdrop
_a : Optional[Any] = layer_norm_eps
_a : Optional[Any] = initializer_range
_a : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : str = apply_spec_augment
_a : List[Any] = mask_time_prob
_a : Optional[Any] = mask_time_length
_a : Union[str, Any] = mask_time_min_masks
_a : List[str] = mask_feature_prob
_a : List[str] = mask_feature_length
_a : str = mask_feature_min_masks
# ctc loss
_a : Any = ctc_loss_reduction
_a : Optional[Any] = ctc_zero_infinity
# sequence classification
_a : List[Any] = use_weighted_layer_sum
_a : Tuple = classifier_proj_size
@property
def __UpperCamelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 120
| 0
|
from __future__ import annotations
a_ = """#"""
class __lowerCAmelCase :
def __init__( self ):
'''simple docstring'''
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self._trie
for char in text:
if char not in trie:
__lowerCamelCase = {}
__lowerCamelCase = trie[char]
__lowerCamelCase = True
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self._trie
for char in prefix:
if char in trie:
__lowerCamelCase = trie[char]
else:
return []
return self._elements(UpperCamelCase__ )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
for c, v in d.items():
__lowerCamelCase = [' '] if c == END else [(c + s) for s in self._elements(UpperCamelCase__ )]
result.extend(UpperCamelCase__ )
return tuple(UpperCamelCase__ )
a_ = Trie()
a_ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def a__ ( _UpperCamelCase : str ):
__lowerCamelCase = trie.find_word(lowerCAmelCase__ )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622
| 0
|
from math import pow, sqrt
def UpperCamelCase_( *lowerCamelCase_ ) -> bool:
_lowercase : Union[str, Any] = len(__A ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__A , __A , __A )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 89
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_5_5 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCamelCase : bool = True , __lowerCamelCase : str=7 , __lowerCamelCase : Union[str, Any]=3_0 , __lowerCamelCase : Tuple=4_0_0 , __lowerCamelCase : List[Any]=3 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_8_8}
_SCREAMING_SNAKE_CASE = size_divisor
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_pad
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int=False ):
"""simple docstring"""
if not batched:
_SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
_SCREAMING_SNAKE_CASE = size / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = size, scale * w
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = scale * h, size
_SCREAMING_SNAKE_CASE = int((1_3_3_3 / 8_0_0) * size )
if max(__lowerCamelCase , __lowerCamelCase ) > max_size:
_SCREAMING_SNAKE_CASE = max_size / max(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = newh * scale
_SCREAMING_SNAKE_CASE = neww * scale
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = int(newh + 0.5 ), int(neww + 0.5 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size_divisor" ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 418
| 0
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger('''transformers.models.speecht5''')
def lowercase__ ( lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint['input_conv.weight_g']
UpperCAmelCase = checkpoint['input_conv.weight_v']
UpperCAmelCase = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F"upsamples.{i}.1.weight_g"]
UpperCAmelCase = checkpoint[F"upsamples.{i}.1.weight_v"]
UpperCAmelCase = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
UpperCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
UpperCAmelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
UpperCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
UpperCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
UpperCAmelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
UpperCAmelCase = checkpoint['output_conv.1.weight_g']
UpperCAmelCase = checkpoint['output_conv.1.weight_v']
UpperCAmelCase = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(lowerCAmelCase )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(lowerCAmelCase )
UpperCAmelCase = torch.load(lowerCAmelCase )
load_weights(orig_checkpoint['model']['generator'] , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = np.load(lowerCAmelCase )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).float()
UpperCAmelCase = torch.from_numpy(lowerCAmelCase ).float()
model.save_pretrained(lowerCAmelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 708
|
"""simple docstring"""
import sys
import turtle
def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : int , ) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 183
| 0
|
"""simple docstring"""
from collections import deque
class a :
def __init__( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = process_name # process name
lowerCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase = arrival_time
lowerCAmelCase = burst_time # remaining burst time
lowerCAmelCase = 0 # total time of the process wait in ready queue
lowerCAmelCase = 0 # time from arrival time to completion time
class a :
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase = queue
# current time
lowerCAmelCase = current_time
# finished process is in this sequence queue
lowerCAmelCase = deque()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for i in range(len(_snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for i in range(len(_snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for i in range(len(_snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return [q.burst_time for q in queue]
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = deque() # sequence deque of finished process
while len(_snake_case ) != 0:
lowerCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase = 0
# set the process's turnaround time because it is finished
lowerCAmelCase = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_snake_case )
self.finish_queue.extend(_snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_snake_case ) ):
lowerCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase = 0
# set the finish time
lowerCAmelCase = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_snake_case )
self.finish_queue.extend(_snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCamelCase__ ( self ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase ,lowerCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__UpperCamelCase : Tuple = Process('''P1''', 0, 53)
__UpperCamelCase : List[Any] = Process('''P2''', 0, 17)
__UpperCamelCase : Tuple = Process('''P3''', 0, 68)
__UpperCamelCase : List[Any] = Process('''P4''', 0, 24)
__UpperCamelCase : Union[str, Any] = 3
__UpperCamelCase : str = [17, 25]
__UpperCamelCase : str = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
__UpperCamelCase : Union[str, Any] = Process('''P1''', 0, 53)
__UpperCamelCase : Any = Process('''P2''', 0, 17)
__UpperCamelCase : Optional[Any] = Process('''P3''', 0, 68)
__UpperCamelCase : List[Any] = Process('''P4''', 0, 24)
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Any = [17, 25]
__UpperCamelCase : int = deque([Pa, Pa, Pa, Pa])
__UpperCamelCase : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
__UpperCamelCase : Optional[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 4
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[float] ):
lowerCAmelCase = 0.00
lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase = F'Resistor at index {index} has a negative value!'
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4
| 1
|
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCAmelCase_ = "\nimport os\n"
lowerCAmelCase_ = "\ndef foo():\n import os\n return False\n"
lowerCAmelCase_ = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
lowerCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
lowerCAmelCase_ = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
lowerCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
lowerCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
lowerCAmelCase_ = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
lowerCAmelCase_ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
lowerCAmelCase_ = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
lowerCAmelCase_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , _snake_case )
def lowerCAmelCase( a__ : str , a__ : str ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = os.path.join(_snake_case , "test_file.py" )
with open(_snake_case , "w" ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCamelCase__ = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 711
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCAmelCase( a__ : list[Any] ):
'''simple docstring'''
create_state_space_tree(a__ , [] , 0 )
def lowerCAmelCase( a__ : list[Any] , a__ : list[Any] , a__ : int ):
'''simple docstring'''
if index == len(a__ ):
print(a__ )
return
create_state_space_tree(a__ , a__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(a__ , a__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 426
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
with open(__A ) as metadata_file:
lowercase__ = json.load(__A )
lowercase__ = LukeConfig(use_entity_aware_attention=__A , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
lowercase__ = torch.load(__A , map_location='cpu' )['module']
# Load the entity vocab file
lowercase__ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
lowercase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowercase__ = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ = AddedToken('<ent>' , lstrip=__A , rstrip=__A )
lowercase__ = AddedToken('<ent2>' , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , 'tokenizer_config.json' ) , 'r' ) as f:
lowercase__ = json.load(__A )
lowercase__ = 'MLukeTokenizer'
with open(os.path.join(__A , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__A , __A )
lowercase__ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
lowercase__ = tokenizer.convert_tokens_to_ids(['@'] )[0]
lowercase__ = tokenizer.convert_tokens_to_ids(['#'] )[0]
lowercase__ = state_dict['embeddings.word_embeddings.weight']
lowercase__ = word_emb[ent_init_index].unsqueeze(0 )
lowercase__ = word_emb[enta_init_index].unsqueeze(0 )
lowercase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowercase__ = state_dict[bias_name]
lowercase__ = decoder_bias[ent_init_index].unsqueeze(0 )
lowercase__ = decoder_bias[enta_init_index].unsqueeze(0 )
lowercase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ = F'''encoder.layer.{layer_index}.attention.self.'''
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
lowercase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ = state_dict['entity_embeddings.entity_embeddings.weight']
lowercase__ = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
lowercase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowercase__ = state_dict['entity_predictions.bias']
lowercase__ = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
lowercase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowercase__ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
lowercase__ = state_dict[key]
else:
lowercase__ = state_dict[key]
lowercase__, lowercase__ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowercase__ = MLukeTokenizer.from_pretrained(__A , task='entity_classification' )
lowercase__ = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
lowercase__ = (0, 9)
lowercase__ = tokenizer(__A , entity_spans=[span] , return_tensors='pt' )
lowercase__ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase__ = torch.Size((1, 33, 768) )
lowercase__ = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowercase__ = torch.Size((1, 1, 768) )
lowercase__ = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
lowercase__ = MLukeTokenizer.from_pretrained(__A )
lowercase__ = 'Tokyo is the capital of <mask>.'
lowercase__ = (24, 30)
lowercase__ = tokenizer(__A , entity_spans=[span] , return_tensors='pt' )
lowercase__ = model(**__A )
lowercase__ = encoding['input_ids'][0].tolist()
lowercase__ = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
lowercase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
lowercase__ = outputs.entity_logits[0][0].argmax().item()
lowercase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__A ) )
model.save_pretrained(__A )
def _lowerCAmelCase ( A__ ):
lowercase__ = ['[MASK]', '[PAD]', '[UNK]']
lowercase__ = [json.loads(__A ) for line in open(__A )]
lowercase__ = {}
for entry in data:
lowercase__ = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowercase__ = entity_id
break
lowercase__ = F'''{language}:{entity_name}'''
lowercase__ = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
a__ : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 622
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 486
| 0
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _UpperCAmelCase ( unittest.TestCase , _A ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = load_tool("text-to-speech" )
self.tool.setup()
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :int = self.tool("hey" )
lowerCAmelCase__ :Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = self.tool("hey" )
lowerCAmelCase__ :str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 712
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = ShapEImgaImgPipeline
A = ['''image''']
A = ['''image''']
A = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
'''simple docstring'''
return 8
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase__ :int = CLIPVisionModel(_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_resize=_lowerCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Any = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCAmelCase__ :int = PriorTransformer(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase__ :str = ShapERenderer(**_lowerCAmelCase )
return model
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.dummy_prior
lowerCAmelCase__ :str = self.dummy_image_encoder
lowerCAmelCase__ :Optional[Any] = self.dummy_image_processor
lowerCAmelCase__ :Union[str, Any] = self.dummy_renderer
lowerCAmelCase__ :str = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
lowerCAmelCase__ :Any = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ :Dict = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ :str = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ :Tuple = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = "cpu"
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ :Union[str, Any] = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :int = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ :List[Any] = output.images[0]
lowerCAmelCase__ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase__ :str = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = torch_device == "cpu"
lowerCAmelCase__ :Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.get_dummy_components()
lowerCAmelCase__ :Tuple = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Tuple = 1
lowerCAmelCase__ :List[Any] = 2
lowerCAmelCase__ :List[str] = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase__ :Any = batch_size * [inputs[key]]
lowerCAmelCase__ :Optional[Any] = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
lowerCAmelCase__ :int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
lowerCAmelCase__ :Optional[int] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
lowerCAmelCase__ :Tuple = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Any = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
lowerCAmelCase__ :List[Any] = pipe(
_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 111
| 0
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase__ ( unittest.TestCase ):
a__ : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
a__ : Union[str, Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __lowercase( self : Optional[Any], __lowerCamelCase : List[Any], __lowerCamelCase : List[Any], __lowerCamelCase : Optional[Any] ) -> Tuple:
UpperCamelCase__ : Any = AudioClassificationPipeline(model=__lowerCamelCase, feature_extractor=__lowerCamelCase )
# test with a raw waveform
UpperCamelCase__ : Tuple = np.zeros((3_40_00,) )
UpperCamelCase__ : str = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __lowercase( self : Any, __lowerCamelCase : str, __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = examples
UpperCamelCase__ : List[Any] = audio_classifier(__lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__lowerCamelCase, [
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
], )
UpperCamelCase__ : Any = audio_classifier(__lowerCamelCase, top_k=1 )
self.assertEqual(
__lowerCamelCase, [
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
], )
self.run_torchaudio(__lowerCamelCase )
@require_torchaudio
def __lowercase( self : Optional[int], __lowerCamelCase : List[str] ) -> Tuple:
import datasets
# test with a local file
UpperCamelCase__ : List[str] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
UpperCamelCase__ : Dict = dataset[0]['''audio''']['''array''']
UpperCamelCase__ : List[str] = audio_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase, [
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
], )
@require_torch
def __lowercase( self : int ) -> Optional[int]:
UpperCamelCase__ : str = '''anton-l/wav2vec2-random-tiny-classifier'''
UpperCamelCase__ : int = pipeline('''audio-classification''', model=__lowerCamelCase )
UpperCamelCase__ : Any = np.ones((80_00,) )
UpperCamelCase__ : Union[str, Any] = audio_classifier(__lowerCamelCase, top_k=4 )
UpperCamelCase__ : Dict = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
UpperCamelCase__ : str = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCamelCase__ : Dict = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
UpperCamelCase__ : str = audio_classifier(__lowerCamelCase, top_k=4 )
self.assertIn(nested_simplify(__lowerCamelCase, decimals=4 ), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __lowercase( self : Union[str, Any] ) -> List[Any]:
import datasets
UpperCamelCase__ : Union[str, Any] = '''superb/wav2vec2-base-superb-ks'''
UpperCamelCase__ : Any = pipeline('''audio-classification''', model=__lowerCamelCase )
UpperCamelCase__ : List[Any] = datasets.load_dataset('''anton-l/superb_dummy''', '''ks''', split='''test''' )
UpperCamelCase__ : Union[str, Any] = np.array(dataset[3]['''speech'''], dtype=np.floataa )
UpperCamelCase__ : Tuple = audio_classifier(__lowerCamelCase, top_k=4 )
self.assertEqual(
nested_simplify(__lowerCamelCase, decimals=3 ), [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
], )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def __lowercase( self : List[str] ) -> Union[str, Any]:
pass
| 344
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Union[List[PIL.Image.Image], np.ndarray]
a__ : Optional[List[bool]]
a__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 344
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCamelCase = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCamelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _lowerCAmelCase ( __lowerCamelCase : Any ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : Dict = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : Tuple = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : Dict = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__SCREAMING_SNAKE_CASE : List[str] = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__SCREAMING_SNAKE_CASE : List[Any] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__SCREAMING_SNAKE_CASE : Any = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def _lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {}
import re
__SCREAMING_SNAKE_CASE : str = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Tuple = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Tuple = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : List[str] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : int = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__SCREAMING_SNAKE_CASE : List[str] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__SCREAMING_SNAKE_CASE : Dict = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = re_encoder_block_conv_in.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : int = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_encoder_block_conv_in.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = re_encoder_block_resnet.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = int(groups[2] ) * 2 + int(groups[3] )
__SCREAMING_SNAKE_CASE : Any = {"1": 1, "3": 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__SCREAMING_SNAKE_CASE : List[str] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Any = prefix + resnet_block
__SCREAMING_SNAKE_CASE : List[str] = re_encoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = re_encoder_block_proj_out.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[int] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Dict = re_encoder_block_proj_out.sub(__lowerCamelCase , __lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = re_decoder_block_conv_out.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = regex_match.groups()
__SCREAMING_SNAKE_CASE : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : List[Any] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_decoder_block_resnet.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = regex_match.groups()
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
__SCREAMING_SNAKE_CASE : Any = {"1": 1, "3": 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Tuple = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__SCREAMING_SNAKE_CASE : List[Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : List[Any] = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_decoder_block_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = re_decoder_block_proj_in.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = regex_match.groups()
__SCREAMING_SNAKE_CASE : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : int = re_decoder_block_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = re_prior_cond_conv_out.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Any = re_prior_cond_conv_out.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_resnet.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = regex_match.groups()
__SCREAMING_SNAKE_CASE : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
__SCREAMING_SNAKE_CASE : str = {"1": 1, "3": 2}[groups[-2]]
__SCREAMING_SNAKE_CASE : Any = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__SCREAMING_SNAKE_CASE : Union[str, Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : Any = prefix + resnet_block
__SCREAMING_SNAKE_CASE : Union[str, Any] = re_prior_cond_resnet.sub(__lowerCamelCase , __lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = re_prior_cond_proj_in.match(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = regex_match.groups()
__SCREAMING_SNAKE_CASE : Dict = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__SCREAMING_SNAKE_CASE : int = re_prior_cond_proj_in.sub(__lowerCamelCase , __lowerCamelCase )
# keep original key
else:
__SCREAMING_SNAKE_CASE : Tuple = original_key
__SCREAMING_SNAKE_CASE : Union[str, Any] = replace_key(__lowerCamelCase )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
__SCREAMING_SNAKE_CASE : List[str] = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__SCREAMING_SNAKE_CASE : str = original_key
__SCREAMING_SNAKE_CASE : List[str] = original_key
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
return new_dict
@torch.no_grad()
def _lowerCAmelCase ( __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__SCREAMING_SNAKE_CASE : Dict = requests.get(F"""{PREFIX}{file}""" , allow_redirects=__lowerCamelCase )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=__lowerCamelCase )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , "wb" ).write(r.content )
__SCREAMING_SNAKE_CASE : int = MODEL_MAPPING[model_name.split("/" )[-1]]
__SCREAMING_SNAKE_CASE : List[str] = JukeboxConfig.from_pretrained(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = JukeboxModel(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for i, dict_name in enumerate(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__SCREAMING_SNAKE_CASE : Optional[int] = old_dic[k]
elif k.endswith(".w" ):
__SCREAMING_SNAKE_CASE : int = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__SCREAMING_SNAKE_CASE : Optional[Any] = old_dic[k]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = old_dic[k]
__SCREAMING_SNAKE_CASE : Optional[Any] = "vqvae" if i == 0 else F"""priors.{3 - i}"""
__SCREAMING_SNAKE_CASE : int = fix_jukebox_keys(__lowerCamelCase , model.state_dict() , __lowerCamelCase , __lowerCamelCase )
weight_dict.append(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = weight_dict.pop(0 )
model.vqvae.load_state_dict(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 447
| 0
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Dict:
_lowercase : Optional[Any] = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
_lowercase , _lowercase : Any = input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowercase : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(SCREAMING_SNAKE_CASE )
_lowercase : Any = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : Optional[int] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : Any = output_path.read_text(encoding='utf-8' )
_lowercase : str = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
_lowercase : Tuple = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
_lowercase : Any = input_paths[compression_format]
if input_path is None:
_lowercase : List[str] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = Extractor.infer_extractor_format(SCREAMING_SNAKE_CASE )
assert extractor_format is not None
_lowercase : int = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowercase : List[Any] = file_path.read_text(encoding='utf-8' )
else:
_lowercase : List[str] = output_path.read_text(encoding='utf-8' )
_lowercase : str = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
import tarfile
_lowercase : List[Any] = tmp_path / 'data_dot_dot'
directory.mkdir()
_lowercase : Optional[Any] = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(SCREAMING_SNAKE_CASE , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
import tarfile
_lowercase : List[Any] = tmp_path / 'data_sym_link'
directory.mkdir()
_lowercase : str = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=SCREAMING_SNAKE_CASE )
with tarfile.TarFile(SCREAMING_SNAKE_CASE , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_lowercase : Optional[int] = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
_lowercase : str = insecure_tar_files[insecure_tar_file]
_lowercase : List[Any] = tmp_path / 'extracted'
TarExtractor.extract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_lowercase : List[str] = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
_lowercase : List[str] = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(SCREAMING_SNAKE_CASE ) # but we're right
| 66
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84
| 0
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE = 2_5_6_0_4_7
SCREAMING_SNAKE_CASE = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = NllbTokenizer
_lowerCamelCase = NllbTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = {}
def snake_case_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = NllbTokenizer(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ):
__a = NllbTokenizer(__A , keep_accents=__A )
__a = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__a = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case_ ( self ):
__a = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(__A , **__A )
__a = self.tokenizer_class.from_pretrained(__A , **__A )
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(__A )
__a = tokenizer_p.save_pretrained(__A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__a = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__A , __A )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(__A )
__a = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A , __A ) )
shutil.rmtree(__A )
# Save tokenizer rust, legacy_format=True
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(__A , legacy_format=__A )
__a = tokenizer_p.save_pretrained(__A )
# Checks it save with the same files
self.assertSequenceEqual(__A , __A )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(__A )
__a = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A , __A ) )
shutil.rmtree(__A )
# Save tokenizer rust, legacy_format=False
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(__A , legacy_format=__A )
__a = tokenizer_p.save_pretrained(__A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(__A )
__a = tokenizer_p.from_pretrained(__A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__A , __A ) )
shutil.rmtree(__A )
@require_torch
def snake_case_ ( self ):
if not self.test_seqaseq:
return
__a = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
__a = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
__a = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
__a = tokenizer.prepare_seqaseq_batch(
src_texts=__A , tgt_texts=__A , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__a = tokenizer.prepare_seqaseq_batch(
__A , tgt_texts=__A , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__a = tokenizer.prepare_seqaseq_batch(
src_texts=__A , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , __A )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = [AddedToken("""<special>""" , lstrip=__A )]
__a = self.rust_tokenizer_class.from_pretrained(
__A , additional_special_tokens=__A , **__A )
__a = tokenizer_r.encode("""Hey this is a <special> token""" )
__a = tokenizer_r.encode("""<special>""" , add_special_tokens=__A )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__a = self.rust_tokenizer_class.from_pretrained(
__A , additional_special_tokens=__A , **__A , )
__a = self.tokenizer_class.from_pretrained(
__A , additional_special_tokens=__A , **__A )
__a = tokenizer_p.encode("""Hey this is a <special> token""" )
__a = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = """facebook/nllb-200-distilled-600M"""
_lowerCamelCase = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_lowerCamelCase = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_lowerCamelCase = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def snake_case_ ( cls ):
__a = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
__a = 1
return cls
def snake_case_ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057 )
def snake_case_ ( self ):
__a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __A )
def snake_case_ ( self ):
self.assertIn(__A , self.tokenizer.all_special_ids )
# fmt: off
__a = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
__a = self.tokenizer.decode(__A , skip_special_tokens=__A )
__a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__A )
self.assertEqual(__A , __A )
self.assertNotIn(self.tokenizer.eos_token , __A )
def snake_case_ ( self ):
__a = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , __A )
__a = 10
__a = self.tokenizer(__A , max_length=__A , truncation=__A ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __A )
self.assertEqual(len(__A ) , __A )
def snake_case_ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256203, 3] )
def snake_case_ ( self ):
__a = tempfile.mkdtemp()
__a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__A )
__a = NllbTokenizer.from_pretrained(__A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __A )
@require_torch
def snake_case_ ( self ):
__a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__A , truncation=__A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__a = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(__A , __A )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __A )
self.assertEqual(__A , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case_ ( self ):
__a = self.tokenizer(self.src_text , padding=__A , truncation=__A , max_length=3 , return_tensors="""pt""" )
__a = self.tokenizer(
text_target=self.tgt_text , padding=__A , truncation=__A , max_length=10 , return_tensors="""pt""" )
__a = targets["""input_ids"""]
__a = shift_tokens_right(
__A , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case_ ( self ):
__a = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(__A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} , )
@require_torch
def snake_case_ ( self ):
__a = True
__a = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
__a = False
__a = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 720
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = KandinskyVaaControlnetImgaImgPipeline
_lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_lowerCamelCase = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
_lowerCamelCase = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_lowerCamelCase = False
@property
def snake_case_ ( self ):
return 32
@property
def snake_case_ ( self ):
return 32
@property
def snake_case_ ( self ):
return self.time_input_dim
@property
def snake_case_ ( self ):
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
return 100
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__a = UNetaDConditionModel(**__A )
return model
@property
def snake_case_ ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
torch.manual_seed(0 )
__a = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
__a = self.dummy_unet
__a = self.dummy_movq
__a = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__a = DDIMScheduler(**__A )
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ ( self , __A , __A=0 ):
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__A ) ).to(__A )
__a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__A )
# create init_image
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
__a = torch.manual_seed(__A )
else:
__a = torch.Generator(device=__A ).manual_seed(__A )
__a = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ):
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__A )
__a = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__a = pipe(**self.get_dummy_inputs(__A ) )
__a = output.images
__a = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
__a = image[0, -3:, -3:, -1]
__a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
__a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__a = init_image.resize((512, 512) )
__a = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__a = torch.from_numpy(np.array(__A ) ).float() / 255.0
__a = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__a = """A robot, 4k photo"""
__a = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
__a = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__a = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
__a = torch.Generator(device="""cpu""" ).manual_seed(0 )
__a , __a = pipe_prior(
__A , image=__A , strength=0.85 , generator=__A , negative_prompt="""""" , ).to_tuple()
__a = pipeline(
image=__A , image_embeds=__A , negative_image_embeds=__A , hint=__A , generator=__A , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__A , __A )
| 209
| 0
|
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCAmelCase__ = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = list(state_dict.keys() )
for name in state_dict_keys:
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
# emb -> embedding
if name.startswith('''emb.''' ):
SCREAMING_SNAKE_CASE_ = name.replace('''emb.''', '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
SCREAMING_SNAKE_CASE_ = name.replace('''blocks.0.ln0''', '''blocks.0.pre_ln''' )
# att -> attention
SCREAMING_SNAKE_CASE_ = re.sub(r'''blocks\.(\d+)\.att''', r'''blocks.\1.attention''', __a )
# ffn -> feed_forward
SCREAMING_SNAKE_CASE_ = re.sub(r'''blocks\.(\d+)\.ffn''', r'''blocks.\1.feed_forward''', __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
SCREAMING_SNAKE_CASE_ = name.replace('''.time_mix_k''', '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
SCREAMING_SNAKE_CASE_ = name.replace('''.time_mix_v''', '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
SCREAMING_SNAKE_CASE_ = name.replace('''.time_mix_r''', '''.time_mix_receptance''' )
if name != "head.weight":
SCREAMING_SNAKE_CASE_ = '''rwkv.''' + name
SCREAMING_SNAKE_CASE_ = weight
return state_dict
def _lowerCamelCase ( __a, __a, __a, __a=None, __a=None, __a=False, __a=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
SCREAMING_SNAKE_CASE_ = 50_277
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
SCREAMING_SNAKE_CASE_ = PreTrainedTokenizerFast(tokenizer_file=__a )
SCREAMING_SNAKE_CASE_ = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
SCREAMING_SNAKE_CASE_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE_ = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
SCREAMING_SNAKE_CASE_ = RwkvConfig(
vocab_size=__a, num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size], hidden_size=HIDEN_SIZE_MAPPING[size], )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE_ = hf_hub_download(__a, __a )
SCREAMING_SNAKE_CASE_ = torch.load(__a, map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ = convert_state_dict(__a )
# 4. Split in shards and save
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a, os.path.join(__a, __a ) )
if index is not None:
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
# Save the index as well
with open(__a, '''w''', encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = json.dumps(__a, indent=2, sort_keys=__a ) + '''\n'''
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
SCREAMING_SNAKE_CASE_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE_ = torch.load(os.path.join(__a, __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(__a, __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
SCREAMING_SNAKE_CASE_ = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a, max_shard_size='''2GB''' )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCAmelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 626
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCamelCase ( __a=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser('''test''' )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''', default=__a, help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
), )
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
SCREAMING_SNAKE_CASE_ = script_name
else:
SCREAMING_SNAKE_CASE_ = F'--config_file={args.config_file} {script_name}'
SCREAMING_SNAKE_CASE_ = ['''accelerate-launch'''] + test_args.split()
SCREAMING_SNAKE_CASE_ = execute_subprocess_async(__a, env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = test_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 626
| 1
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase = sys.version_info >= (3, 10)
def UpperCAmelCase ( A : str=None , A : Optional[int]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=A )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = field(default='''toto''', metadata={'''help''': '''help message'''} )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = None
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''titi'''
_UpperCAmelCase = '''toto'''
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''titi'''
_UpperCAmelCase = '''toto'''
_UpperCAmelCase = 42
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = "toto"
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = "toto"
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = None
_UpperCAmelCase = field(default=A, metadata={'''help''': '''help message'''} )
_UpperCAmelCase = None
_UpperCAmelCase = list_field(default=[] )
_UpperCAmelCase = list_field(default=[] )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = list_field(default=[] )
_UpperCAmelCase = list_field(default=[1, 2, 3] )
_UpperCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
_UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = field()
_UpperCAmelCase = field()
_UpperCAmelCase = field()
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = field()
_UpperCAmelCase = None
_UpperCAmelCase = field(default='''toto''', metadata={'''help''': '''help message'''} )
_UpperCAmelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = None
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = None
_UpperCAmelCase = field(default=A, metadata={'''help''': '''help message'''} )
_UpperCAmelCase = None
_UpperCAmelCase = list_field(default=[] )
_UpperCAmelCase = list_field(default=[] )
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_UpperCAmelCase = {k: v for k, v in vars(snake_case ).items() if k != 'container'}
_UpperCAmelCase = {k: v for k, v in vars(snake_case ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , snake_case ) and yy.get('choices' , snake_case ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](snake_case ) , yy['type'](snake_case ) )
del xx["type"], yy["type"]
self.assertEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , type=snake_case , required=snake_case )
expected.add_argument('--bar' , type=snake_case , required=snake_case )
expected.add_argument('--baz' , type=snake_case , required=snake_case )
expected.add_argument('--flag' , type=snake_case , default=snake_case , const=snake_case , nargs='?' )
self.argparsersEqual(snake_case , snake_case )
_UpperCAmelCase = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(snake_case , look_for_args_file=snake_case )
self.assertFalse(example.flag )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=snake_case )
expected.add_argument('--baz' , default='toto' , type=snake_case , help='help message' )
self.argparsersEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , type=snake_case , default=snake_case , const=snake_case , nargs='?' )
expected.add_argument('--baz' , type=snake_case , default=snake_case , const=snake_case , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=snake_case , dest='baz' )
expected.add_argument('--opt' , type=snake_case , default=snake_case )
_UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(snake_case )
self.argparsersEqual(snake_case , snake_case )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) )
_UpperCAmelCase = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) )
_UpperCAmelCase = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) )
_UpperCAmelCase = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) )
_UpperCAmelCase = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(snake_case , Namespace(foo=snake_case , baz=snake_case , opt=snake_case ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(snake_case , snake_case )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
_UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_UpperCAmelCase = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_UpperCAmelCase = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
_UpperCAmelCase = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCamelCase_ ( self ) -> Optional[int]:
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = "toto"
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(snake_case , snake_case )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
_UpperCAmelCase = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
_UpperCAmelCase = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=snake_case )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=snake_case )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=snake_case )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=snake_case )
self.argparsersEqual(snake_case , snake_case )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
snake_case , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
_UpperCAmelCase = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(snake_case , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , default=snake_case , type=snake_case )
expected.add_argument('--bar' , default=snake_case , type=snake_case , help='help message' )
expected.add_argument('--baz' , default=snake_case , type=snake_case )
expected.add_argument('--ces' , nargs='+' , default=[] , type=snake_case )
expected.add_argument('--des' , nargs='+' , default=[] , type=snake_case )
_UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(snake_case )
for dataclass_type in dataclass_types:
_UpperCAmelCase = HfArgumentParser(snake_case )
self.argparsersEqual(snake_case , snake_case )
_UpperCAmelCase = parser.parse_args([] )
self.assertEqual(snake_case , Namespace(foo=snake_case , bar=snake_case , baz=snake_case , ces=[] , des=[] ) )
_UpperCAmelCase = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(snake_case , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=snake_case , required=snake_case )
expected.add_argument('--required_str' , type=snake_case , required=snake_case )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=snake_case , )
self.argparsersEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , type=snake_case , required=snake_case )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=snake_case , )
expected.add_argument('--opt' , type=snake_case , default=snake_case )
expected.add_argument('--baz' , default='toto' , type=snake_case , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=snake_case )
self.argparsersEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
_UpperCAmelCase = parser.parse_dict(snake_case )[0]
_UpperCAmelCase = BasicExample(**snake_case )
self.assertEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(snake_case , parser.parse_dict , snake_case , allow_extra_keys=snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(snake_case , 'temp_json' )
os.mkdir(snake_case )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(snake_case , snake_case )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
_UpperCAmelCase = BasicExample(**snake_case )
self.assertEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = HfArgumentParser(snake_case )
_UpperCAmelCase = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(snake_case , 'temp_yaml' )
os.mkdir(snake_case )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(snake_case , snake_case )
_UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
_UpperCAmelCase = BasicExample(**snake_case )
self.assertEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = HfArgumentParser(snake_case )
self.assertIsNotNone(snake_case )
| 24
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Callable[[int | float], int | float], SCREAMING_SNAKE_CASE__: int | float, SCREAMING_SNAKE_CASE__: int | float, SCREAMING_SNAKE_CASE__: int = 100, ) -> float:
"""simple docstring"""
__a = x_start
__a = fnc(SCREAMING_SNAKE_CASE__ )
__a = 0.0
for _ in range(SCREAMING_SNAKE_CASE__ ):
# Approximates curve as a sequence of linear lines and sums their length
__a = (x_end - x_start) / steps + xa
__a = fnc(SCREAMING_SNAKE_CASE__ )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
__a = xa
__a = fxa
return length
if __name__ == "__main__":
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Any ) -> int:
"""simple docstring"""
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
__UpperCamelCase : Optional[Any] = 10
while i <= 100000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 448
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCamelCase : Optional[int] = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
__UpperCamelCase : Tuple = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
__UpperCamelCase : Optional[int] = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[int], SCREAMING_SNAKE_CASE__: str ) -> List[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Optional[Any], SCREAMING_SNAKE_CASE__: List[str] ) -> Optional[Any]:
"""simple docstring"""
__a = simple_accuracy(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
__a = float(fa_score(y_true=SCREAMING_SNAKE_CASE__, y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int, SCREAMING_SNAKE_CASE__: Optional[Any] ) -> int:
"""simple docstring"""
__a = float(pearsonr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0] )
__a = float(spearmanr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->Tuple:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase , lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase , lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase , lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase , lowerCamelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 448
| 1
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = 16 ) -> Any:
__lowerCAmelCase : Any = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase : Any = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase : Optional[int] = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase : Union[str, Any] = 8
else:
__lowerCAmelCase : Dict = None
return tokenizer.pad(
__snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCAmelCase : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__lowerCAmelCase : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ) -> List[Any]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __snake_case ) == "1":
__lowerCAmelCase : Optional[int] = 2
# Initialize accelerator
__lowerCAmelCase : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : Any = config['lr']
__lowerCAmelCase : int = int(config['num_epochs'] )
__lowerCAmelCase : Optional[int] = int(config['seed'] )
__lowerCAmelCase : Any = int(config['batch_size'] )
__lowerCAmelCase : Tuple = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(_UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase : str = AdamW(params=model.parameters() , lr=__snake_case )
__lowerCAmelCase , __lowerCAmelCase : int = get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
__lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase : Optional[int] = model(**__snake_case )
__lowerCAmelCase : Optional[int] = outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase : int = model(**__snake_case )
__lowerCAmelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__lowerCAmelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __lowerCAmelCase () -> Tuple:
__lowerCAmelCase : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCAmelCase : Any = parser.parse_args()
__lowerCAmelCase : int = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase : List[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase : Tuple = ''
else:
__lowerCAmelCase : int = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__lowerCAmelCase : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : Tuple = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase : Union[str, Any] = in_proj_bias[: config.hidden_size]
__lowerCAmelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : int = dct.pop(_UpperCamelCase )
__lowerCAmelCase : List[Any] = val
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase : List[str] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_UpperCamelCase , )
__lowerCAmelCase : Tuple = ViTHybridConfig(backbone_config=_UpperCamelCase , image_size=384 , num_labels=1000 )
__lowerCAmelCase : Optional[Any] = False
# load original model from timm
__lowerCAmelCase : Tuple = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase : str = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCamelCase )
__lowerCAmelCase : Dict = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : int = 'huggingface/label-files'
__lowerCAmelCase : str = 'imagenet-1k-id2label.json'
__lowerCAmelCase : Any = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : List[str] = idalabel
__lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowerCAmelCase : str = ViTHybridModel(_UpperCamelCase ).eval()
else:
__lowerCAmelCase : Optional[Any] = ViTHybridForImageClassification(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
# create image processor
__lowerCAmelCase : Union[str, Any] = create_transform(**resolve_data_config({} , model=_UpperCamelCase ) )
__lowerCAmelCase : Optional[int] = transform.transforms
__lowerCAmelCase : List[str] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
__lowerCAmelCase : List[str] = ViTHybridImageProcessor(
do_resize=_UpperCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__lowerCAmelCase : Optional[Any] = prepare_img()
__lowerCAmelCase : Union[str, Any] = transform(_UpperCamelCase ).unsqueeze(0 )
__lowerCAmelCase : Union[str, Any] = processor(_UpperCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCamelCase , _UpperCamelCase )
# verify logits
with torch.no_grad():
__lowerCAmelCase : Any = model(_UpperCamelCase )
__lowerCAmelCase : List[Any] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
__lowerCAmelCase : Optional[int] = timm_model.forward_features(_UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCamelCase , outputs.pooler_output , atol=1e-3 )
else:
__lowerCAmelCase : List[str] = timm_model(_UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCamelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 549
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = PegasusConfig
_SCREAMING_SNAKE_CASE : int = {}
_SCREAMING_SNAKE_CASE : Optional[Any] = '''gelu'''
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any]=13 , _SCREAMING_SNAKE_CASE : Optional[Any]=7 , _SCREAMING_SNAKE_CASE : str=True , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Tuple=99 , _SCREAMING_SNAKE_CASE : List[Any]=32 , _SCREAMING_SNAKE_CASE : str=2 , _SCREAMING_SNAKE_CASE : Tuple=4 , _SCREAMING_SNAKE_CASE : Optional[int]=37 , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : List[str]=40 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Tuple=1 , _SCREAMING_SNAKE_CASE : int=0 , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : str = is_training
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : str = bos_token_id
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE : Any = prepare_pegasus_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _lowerCAmelCase ( self : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFPegasusModel(config=_SCREAMING_SNAKE_CASE ).get_decoder()
SCREAMING_SNAKE_CASE : Tuple = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : Any = input_ids[:1, :]
SCREAMING_SNAKE_CASE : List[Any] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Tuple = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE : Optional[int] = 1
# first forward pass
SCREAMING_SNAKE_CASE : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : str = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : List[str] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 )
def __snake_case ( __A : Dict , __A : int , __A : List[Any] , __A : Optional[Any]=None , __A : Optional[Any]=None , __A : str=None , __A : Dict=None , __A : Optional[int]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : str = tf.cast(tf.math.not_equal(__A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : Optional[Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : List[str] = False
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_SCREAMING_SNAKE_CASE : List[Any] = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_SCREAMING_SNAKE_CASE : List[Any] = '''google/pegasus-xsum'''
@cached_property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowerCAmelCase ( self : Dict , **_SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.translate_src_text(**_SCREAMING_SNAKE_CASE )
assert self.expected_text == generated_words
def _lowerCAmelCase ( self : List[str] , **_SCREAMING_SNAKE_CASE : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , **_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
SCREAMING_SNAKE_CASE : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_SCREAMING_SNAKE_CASE )
return generated_words
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 265
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any]=7 , _SCREAMING_SNAKE_CASE : Union[str, Any]=3 , _SCREAMING_SNAKE_CASE : Optional[int]=18 , _SCREAMING_SNAKE_CASE : List[Any]=30 , _SCREAMING_SNAKE_CASE : Optional[int]=400 , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE : List[Any]=False , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE : int = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
SCREAMING_SNAKE_CASE : List[str] = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : str = size
SCREAMING_SNAKE_CASE : Dict = do_center_crop
SCREAMING_SNAKE_CASE : List[Any] = crop_size
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : Optional[Any] = image_mean
SCREAMING_SNAKE_CASE : Dict = image_std
SCREAMING_SNAKE_CASE : Any = do_reduce_labels
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE : Any = Image.open(dataset[0]['file'] )
SCREAMING_SNAKE_CASE : int = Image.open(dataset[1]['file'] )
return image, map
def __snake_case ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE : Optional[int] = Image.open(ds[1]['file'] )
SCREAMING_SNAKE_CASE : Tuple = Image.open(ds[2]['file'] )
SCREAMING_SNAKE_CASE : Tuple = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = BeitImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = BeitImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = []
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE : Any = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE : int = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Any = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 265
| 1
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase = LxmertConfig.from_json_file(__UpperCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCamelCase = LxmertForPreTraining(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class __snake_case ( a__ ):
SCREAMING_SNAKE_CASE__ = '''git_vision_model'''
def __init__( self ,a_=768 ,a_=3072 ,a_=12 ,a_=12 ,a_=3 ,a_=224 ,a_=16 ,a_="quick_gelu" ,a_=1e-5 ,a_=0.0 ,a_=0.02 ,**a_ ,):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ,a_ ,**a_ ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
lowerCAmelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
class __snake_case ( a__ ):
SCREAMING_SNAKE_CASE__ = '''git'''
def __init__( self ,a_=None ,a_=3_0522 ,a_=768 ,a_=6 ,a_=12 ,a_=3072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=1024 ,a_=0.02 ,a_=1e-1_2 ,a_=0 ,a_="absolute" ,a_=True ,a_=False ,a_=101 ,a_=102 ,a_=None ,**a_ ,):
"""simple docstring"""
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,pad_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
lowerCAmelCase__ = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
lowerCAmelCase__ = GitVisionConfig(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = tie_word_embeddings
lowerCAmelCase__ = num_image_with_embedding
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.vision_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
| 193
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """depth_multiplier""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=0.25 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="relu6" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=None , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : Any = depth_multiplier
SCREAMING_SNAKE_CASE__ : int = min_depth
SCREAMING_SNAKE_CASE__ : Any = tf_padding
SCREAMING_SNAKE_CASE__ : int = int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE__ : Any = output_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : int = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scope
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCamelCase : Dict = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Tuple = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Tuple = False
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = 26
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : str = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 223
| 0
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_A = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_A = {
"allenai/led-base-16384": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
snake_case = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def lowercase_ ( A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__(self : List[Any] , _A : Any , _A : int , _A : Tuple="replace" , _A : str="<s>" , _A : Dict="</s>" , _A : Dict="</s>" , _A : List[str]="<s>" , _A : Union[str, Any]="<unk>" , _A : Optional[int]="<pad>" , _A : Dict="<mask>" , _A : Any=False , **_A : Optional[Any] , ) -> Tuple:
snake_case = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
snake_case = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
snake_case = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
snake_case = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
snake_case = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
snake_case = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding="utf-8" ) as vocab_handle:
snake_case = json.load(_A )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding="utf-8" ) as merges_handle:
snake_case = merges_handle.read().split("\n" )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(_A , range(len(_A ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase(self : int ) -> Tuple:
return len(self.encoder )
def UpperCAmelCase(self : List[str] ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase(self : int , _A : int ) -> List[str]:
if token in self.cache:
return self.cache[token]
snake_case = tuple(_A )
snake_case = get_pairs(_A )
if not pairs:
return token
while True:
snake_case = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case , snake_case = bigram
snake_case = []
snake_case = 0
while i < len(_A ):
try:
snake_case = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(_A )
snake_case = new_word
if len(_A ) == 1:
break
else:
snake_case = get_pairs(_A )
snake_case = " ".join(_A )
snake_case = word
return word
def UpperCAmelCase(self : List[str] , _A : str ) -> Union[str, Any]:
snake_case = []
for token in re.findall(self.pat , _A ):
snake_case = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def UpperCAmelCase(self : Optional[int] , _A : Tuple ) -> Union[str, Any]:
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def UpperCAmelCase(self : List[str] , _A : Dict ) -> Union[str, Any]:
return self.decoder.get(_A )
def UpperCAmelCase(self : List[Any] , _A : Any ) -> int:
snake_case = "".join(_A )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase(self : Dict , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" )
snake_case = 0
with open(_A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
snake_case = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase(self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase(self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def UpperCAmelCase(self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase(self : Optional[Any] , _A : int , _A : List[Any]=False , **_A : Union[str, Any] ) -> Any:
snake_case = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
snake_case = " " + text
return (text, kwargs)
def UpperCAmelCase(self : Tuple , _A : Union[Dict[str, EncodedInput], BatchEncoding] , _A : Optional[int] = None , _A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _A : Optional[int] = None , _A : Optional[bool] = None , ) -> dict:
snake_case = super()._pad(
encoded_inputs=_A , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , )
# Load from model defaults
if return_attention_mask is None:
snake_case = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs["global_attention_mask"] ) != len(_A )
if needs_to_be_padded:
snake_case = len(_A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 714
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> List[Any]:
"""simple docstring"""
snake_case = {}
if train_file is not None:
snake_case = [train_file]
if eval_file is not None:
snake_case = [eval_file]
if test_file is not None:
snake_case = [test_file]
snake_case = datasets.load_dataset("csv" , data_files=A__ )
snake_case = list(ds[list(files.keys() )[0]].features.keys() )
snake_case = features_name.pop(A__ )
snake_case = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case = {label: i for i, label in enumerate(A__ )}
snake_case = tokenizer.model_input_names
snake_case = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding="max_length" ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding="max_length" , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_A = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : int = field(metadata={"help": "Which column contains the label"} )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "The path of the training file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the development file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the test file"} )
UpperCAmelCase__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case = trainer.evaluate()
snake_case = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(A__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 294
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = parent
UpperCamelCase__ :int = 13
UpperCamelCase__ :Optional[int] = 7
UpperCamelCase__ :Dict = True
UpperCamelCase__ :Dict = True
UpperCamelCase__ :str = True
UpperCamelCase__ :List[Any] = True
UpperCamelCase__ :Any = True
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Optional[int] = False
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Optional[int] = 2
UpperCamelCase__ :List[str] = 99
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Any = 32
UpperCamelCase__ :List[str] = 2
UpperCamelCase__ :int = 4
UpperCamelCase__ :List[str] = 0.1
UpperCamelCase__ :Union[str, Any] = 0.1
UpperCamelCase__ :Union[str, Any] = 5_12
UpperCamelCase__ :List[str] = 16
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 0.02
UpperCamelCase__ :Optional[int] = 3
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :Optional[int] = """last"""
UpperCamelCase__ :Tuple = True
UpperCamelCase__ :int = None
UpperCamelCase__ :Dict = 0
def __a ( self :int ):
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCamelCase__ :Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase__ :Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ :List[str] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ :int = None
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :List[str] = None
if self.use_labels:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ :str = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCamelCase__ :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ :List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , ):
UpperCamelCase__ :int = TFFlaubertModel(config=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = [input_ids, input_mask]
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , ):
UpperCamelCase__ :List[str] = TFFlaubertWithLMHeadModel(lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCamelCase__ :Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self :Dict , lowerCamelCase__ :List[str] , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , ):
UpperCamelCase__ :int = TFFlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
UpperCamelCase__ :int = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , ):
UpperCamelCase__ :List[Any] = TFFlaubertForSequenceClassification(lowerCamelCase__ )
UpperCamelCase__ :List[str] = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Any , ):
UpperCamelCase__ :Any = self.num_labels
UpperCamelCase__ :Tuple = TFFlaubertForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ :List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[str] , ):
UpperCamelCase__ :Optional[int] = self.num_choices
UpperCamelCase__ :Dict = TFFlaubertForMultipleChoice(config=lowerCamelCase__ )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ :int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCamelCase__ :List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self :Tuple ):
UpperCamelCase__ :str = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :str = config_and_inputs
UpperCamelCase__ :Optional[Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case : List[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case : Optional[int] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Tuple = False
def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :int , lowerCamelCase__ :str , lowerCamelCase__ :List[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self :List[str] ):
UpperCamelCase__ :List[str] = TFFlaubertModelTester(self )
UpperCamelCase__ :Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def __a ( self :int ):
self.config_tester.run_common_tests()
def __a ( self :List[str] ):
UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def __a ( self :Tuple ):
UpperCamelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def __a ( self :Union[str, Any] ):
UpperCamelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def __a ( self :Any ):
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCamelCase__ )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCamelCase__ )
@slow
def __a ( self :str ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFFlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self :str ):
UpperCamelCase__ :Tuple = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCamelCase__ :Optional[int] = tf.convert_to_tensor(
[[0, 1_58, 7_35, 25_92, 14_24, 67_27, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCamelCase__ :Optional[Any] = model(lowerCamelCase__ )[0]
UpperCamelCase__ :Optional[int] = tf.TensorShape((1, 8, 5_12) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
UpperCamelCase__ :str = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 45
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
__lowerCAmelCase = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
a_ =2
a_ =[]
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase__ )
if n > 1:
factors.append(lowercase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
| 1
|
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526
|
'''simple docstring'''
import numpy as np
def lowerCamelCase__ ( _A ):
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase__ ( _A ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526
| 1
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowerCAmelCase_ ( snake_case_ : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def lowerCAmelCase_ ( snake_case_ : str = "" ) -> bool:
'''simple docstring'''
if len(snake_case_ ) == 0:
return True
UpperCAmelCase_ = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCAmelCase_ = {}
for character in lower_case_input_str:
UpperCAmelCase_ = character_freq_dict.get(snake_case_ , 0 ) + 1
UpperCAmelCase_ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCAmelCase_ ( snake_case_ : str = "" ) -> None:
'''simple docstring'''
print("\nFor string = " , snake_case_ , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(snake_case_ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(snake_case_ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE_: Optional[int] =can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 415
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : List[Any]=1E-1_2 ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case_ , axis=1 ) , a_min=snake_case_ ) ).T
UpperCAmelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case_ , axis=1 ) , a_min=snake_case_ ) ).T
return jnp.matmul(snake_case_ , norm_emb_a.T )
class __A ( nn.Module ):
a__ : CLIPConfig
a__ : jnp.dtype = jnp.floataa
def _lowercase (self : str ):
UpperCAmelCase_ = FlaxCLIPVisionModule(self.config.vision_config )
UpperCAmelCase_ = nn.Dense(self.config.projection_dim , use_bias=__a , dtype=self.dtype )
UpperCAmelCase_ = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCAmelCase_ = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCAmelCase_ = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
UpperCAmelCase_ = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__(self : int , __a : Union[str, Any] ):
UpperCAmelCase_ = self.vision_model(__a )[1]
UpperCAmelCase_ = self.visual_projection(__a )
UpperCAmelCase_ = jax_cosine_distance(__a , self.special_care_embeds )
UpperCAmelCase_ = jax_cosine_distance(__a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCAmelCase_ = jnp.round(__a , 3 )
UpperCAmelCase_ = jnp.any(special_scores > 0 , axis=1 , keepdims=__a )
# Use a lower threshold if an image has any special care concept
UpperCAmelCase_ = is_special_care * 0.01
UpperCAmelCase_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCAmelCase_ = jnp.round(__a , 3 )
UpperCAmelCase_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __A ( UpperCamelCase__ ):
a__ : str = CLIPConfig
a__ : int = """clip_input"""
a__ : str = FlaxStableDiffusionSafetyCheckerModule
def __init__(self : Optional[int] , __a : CLIPConfig , __a : Optional[Tuple] = None , __a : int = 0 , __a : jnp.dtype = jnp.floataa , __a : bool = True , **__a : Optional[int] , ):
if input_shape is None:
UpperCAmelCase_ = (1, 224, 224, 3)
UpperCAmelCase_ = self.module_class(config=__a , dtype=__a , **__a )
super().__init__(__a , __a , input_shape=__a , seed=__a , dtype=__a , _do_init=_do_init )
def _lowercase (self : Tuple , __a : jax.random.KeyArray , __a : Tuple , __a : FrozenDict = None ):
# init input tensor
UpperCAmelCase_ = jax.random.normal(__a , __a )
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(__a )
UpperCAmelCase_ = {"params": params_rng, "dropout": dropout_rng}
UpperCAmelCase_ = self.module.init(__a , __a )["params"]
return random_params
def __call__(self : Optional[int] , __a : Any , __a : dict = None , ):
UpperCAmelCase_ = jnp.transpose(__a , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__a , dtype=jnp.floataa ) , rngs={} , )
| 415
| 1
|
from collections.abc import Callable
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = a
__a = b
if function(lowerCAmelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase__ ) == 0:
return b
elif (
function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
__a = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase__ ) == 0:
return mid
elif function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) < 0:
__a = mid
else:
__a = mid
__a = start + (end - start) / 2.0
return mid
def a (lowerCAmelCase__ ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 99
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=3 , __A=224 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , ):
__a = size if size is not None else {"""height""": 18, """width""": 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
def snake_case_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __UpperCAmelCase ( __A , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = ViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ):
__a = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__a = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a = image_processor(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__a = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a = image_processor(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def snake_case_ ( self ):
# Initialize image_processor
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__a = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__a = image_processor(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 99
| 1
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
SCREAMING_SNAKE_CASE__ : Tuple = """bert-base-cased"""
SCREAMING_SNAKE_CASE__ : List[str] = """google/pegasus-xsum"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
SCREAMING_SNAKE_CASE__ : Any = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = """patrickvonplaten/t5-tiny-random"""
SCREAMING_SNAKE_CASE__ : List[str] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """sshleifer/tiny-mbart"""
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/tiny-marian-en-de"""
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :int = '''\n'''.join(snake_case )
Path(snake_case ).open('''w''' ).writelines(snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(snake_case, f'''{split}.source''' ), snake_case )
_dump_articles(os.path.join(snake_case, f'''{split}.target''' ), snake_case )
return tmp_dir
class lowerCamelCase_ ( lowerCamelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
__magic_name__ :Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__magic_name__ :List[str] = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES )
__magic_name__ :Dict = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES )
__magic_name__ :Dict = 4
__magic_name__ :Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__magic_name__ , __magic_name__ :Optional[int] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__magic_name__ :Optional[int] = SeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path='''train''' , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , )
__magic_name__ :str = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__magic_name__ :int = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
__magic_name__ :str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__magic_name__ :Optional[int] = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES )
__magic_name__ :Union[str, Any] = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES )
__magic_name__ :str = 4
__magic_name__ :Optional[Any] = LegacySeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path='''train''' , max_source_length=2_0 , max_target_length=__lowerCAmelCase , )
__magic_name__ :Dict = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
__magic_name__ :List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__magic_name__ :int = tmp_dir.joinpath('''train.source''' ).open().readlines()
__magic_name__ :Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__lowerCAmelCase , __lowerCAmelCase , 1_2_8 , __lowerCAmelCase )
__magic_name__ :Optional[Any] = {x.name for x in tmp_dir.iterdir()}
__magic_name__ :Union[str, Any] = {x.name for x in save_dir.iterdir()}
__magic_name__ :Optional[int] = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowerCAmelCase ) < len(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__lowerCAmelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def A ( self ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
__magic_name__ , __magic_name__ , __magic_name__ :str = self._get_dataset(max_len=6_4 )
__magic_name__ :Union[str, Any] = 6_4
__magic_name__ :Optional[Any] = ds.make_dynamic_sampler(__lowerCAmelCase , required_batch_size_multiple=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = [len(__lowerCAmelCase ) for x in batch_sampler]
assert len(set(__lowerCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowerCAmelCase ) == len(__lowerCAmelCase ) # no dropped or added examples
__magic_name__ :int = DataLoader(__lowerCAmelCase , batch_sampler=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
__magic_name__ :Any = []
__magic_name__ :Union[str, Any] = []
for batch in data_loader:
__magic_name__ :str = batch['''input_ids'''].shape
__magic_name__ :int = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__magic_name__ :Union[str, Any] = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(__lowerCAmelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowerCAmelCase )
assert num_src_per_batch[0] == max(__lowerCAmelCase )
if failures:
raise AssertionError(F'''too many tokens in {len(__lowerCAmelCase )} batches''' )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ :Optional[int] = self._get_dataset(max_len=5_1_2 )
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = ds.make_sortish_sampler(__lowerCAmelCase , shuffle=__lowerCAmelCase )
__magic_name__ :str = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
__magic_name__ :Dict = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer.pad_token_id
def count_pad_tokens(__lowerCAmelCase , __lowerCAmelCase="input_ids" ):
return [batch[k].eq(__lowerCAmelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowerCAmelCase , k='''labels''' ) ) < sum(count_pad_tokens(__lowerCAmelCase , k='''labels''' ) )
assert sum(count_pad_tokens(__lowerCAmelCase ) ) < sum(count_pad_tokens(__lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
def A ( self , __lowerCAmelCase=1_0_0_0 , __lowerCAmelCase=1_2_8 ):
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , __lowerCAmelCase ):
__magic_name__ :Any = '''examples/seq2seq/wmt_en_ro'''
__magic_name__ :Dict = max_len * 2 * 6_4
if not Path(__lowerCAmelCase ).joinpath('''train.len''' ).exists():
save_len_file(__lowerCAmelCase , __lowerCAmelCase )
else:
__magic_name__ :Optional[int] = '''examples/seq2seq/test_data/wmt_en_ro'''
__magic_name__ :Tuple = max_len * 4
save_len_file(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
__magic_name__ :Dict = SeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path='''train''' , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , n_obs=__lowerCAmelCase , )
return ds, max_tokens, tokenizer
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ :Tuple = self._get_dataset()
__magic_name__ :str = set(DistributedSortishSampler(__lowerCAmelCase , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=__lowerCAmelCase ) )
__magic_name__ :List[str] = set(DistributedSortishSampler(__lowerCAmelCase , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=__lowerCAmelCase ) )
assert idsa.intersection(__lowerCAmelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase )
if tok_name == MBART_TINY:
__magic_name__ :Any = SeqaSeqDataset(
__lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__magic_name__ :Union[str, Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__magic_name__ :str = SeqaSeqDataset(
__lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__magic_name__ :Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowerCAmelCase ) == 1 if tok_name == BART_TINY else len(__lowerCAmelCase ) == 0
| 180
|
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__magic_name__ :Dict = mf_knapsack(i - 1, snake_case, snake_case, snake_case )
else:
__magic_name__ :Optional[Any] = max(
mf_knapsack(i - 1, snake_case, snake_case, snake_case ), mf_knapsack(i - 1, snake_case, snake_case, j - wt[i - 1] ) + val[i - 1], )
__magic_name__ :List[Any] = val
return f[i][j]
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
__magic_name__ :List[str] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
__magic_name__ :Optional[int] = dp[i - 1][w_]
return dp[n][w_], dp
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if not (isinstance(snake_case, (list, tuple) ) and isinstance(snake_case, (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
__magic_name__ :Dict = len(snake_case )
if num_items != len(snake_case ):
__magic_name__ :str = (
'''The number of weights must be the same as the number of values.\n'''
f'''But got {num_items} weights and {len(snake_case )} values'''
)
raise ValueError(snake_case )
for i in range(snake_case ):
if not isinstance(wt[i], snake_case ):
__magic_name__ :str = (
'''All weights must be integers but got weight of '''
f'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(snake_case )
__magic_name__ , __magic_name__ :Tuple = knapsack(snake_case, snake_case, snake_case, snake_case )
__magic_name__ :set = set()
_construct_solution(snake_case, snake_case, snake_case, snake_case, snake_case )
return optimal_val, example_optional_set
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case, snake_case, i - 1, snake_case, snake_case )
else:
optimal_set.add(snake_case )
_construct_solution(snake_case, snake_case, i - 1, j - wt[i - 1], snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [3, 2, 4, 4]
SCREAMING_SNAKE_CASE__ : Any = [4, 3, 2, 3]
SCREAMING_SNAKE_CASE__ : List[str] = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = 6
SCREAMING_SNAKE_CASE__ : str = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 180
| 1
|
import baseaa
def lowerCamelCase__ (__lowerCamelCase ):
return baseaa.baaencode(string.encode("utf-8" ) )
def lowerCamelCase__ (__lowerCamelCase ):
return baseaa.baadecode(SCREAMING_SNAKE_CASE__ ).decode("utf-8" )
if __name__ == "__main__":
UpperCamelCase__ ='Hello World!'
UpperCamelCase__ =baseaa_encode(test)
print(encoded)
UpperCamelCase__ =baseaa_decode(encoded)
print(decoded)
| 249
|
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCAmelCase_ : List[Any] = '.'
if __name__ == "__main__":
UpperCAmelCase_ : Any = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCAmelCase_ : int = line.strip()
UpperCAmelCase_ : str = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCAmelCase_ : int = '\n'.join(non_existent_paths)
raise ValueError(F"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 533
| 0
|
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
lowerCamelCase__ = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
lowerCamelCase__ , lowerCamelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowerCamelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
assert base_extractor.is_extractable(__lowercase )
lowerCamelCase__ = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__lowercase , __lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCamelCase__ = file_path.read_text(encoding="""utf-8""" )
else:
lowerCamelCase__ = output_path.read_text(encoding="""utf-8""" )
lowerCamelCase__ = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
"""simple docstring"""
lowerCamelCase__ = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
lowerCamelCase__ = input_paths[compression_format]
if input_path is None:
lowerCamelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
lowerCamelCase__ = Extractor.infer_extractor_format(__lowercase )
assert extractor_format is not None
lowerCamelCase__ = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__lowercase , __lowercase , __lowercase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowerCamelCase__ = file_path.read_text(encoding="""utf-8""" )
else:
lowerCamelCase__ = output_path.read_text(encoding="""utf-8""" )
lowerCamelCase__ = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
import tarfile
lowerCamelCase__ = tmp_path / """data_dot_dot"""
directory.mkdir()
lowerCamelCase__ = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__lowercase , """w""" ) as f:
f.add(__lowercase , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def _A ( __lowercase ):
"""simple docstring"""
import tarfile
lowerCamelCase__ = tmp_path / """data_sym_link"""
directory.mkdir()
lowerCamelCase__ = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__lowercase )
with tarfile.TarFile(__lowercase , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
lowerCamelCase__ = insecure_tar_files[insecure_tar_file]
lowerCamelCase__ = tmp_path / """extracted"""
TarExtractor.extract(__lowercase , __lowercase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
lowerCamelCase__ = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__lowercase )
assert zipfile.is_zipfile(str(__lowercase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowercase ) # but we're right
| 716
|
"""simple docstring"""
from itertools import count
def _A ( __lowercase = 50 ):
"""simple docstring"""
lowerCamelCase__ = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_length in range(__lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F'{solution() = }')
| 258
| 0
|
import math
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = [True] * n
A__ = False
A__ = False
A__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
A__ = i * 2
while index < n:
A__ = False
A__ = index + i
A__ = [2]
for i in range(3 , _UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def SCREAMING_SNAKE_CASE ( lowercase_ = 999_966_663_333 ) -> Any:
"""simple docstring"""
A__ = math.floor(math.sqrt(_UpperCAmelCase ) ) + 100
A__ = prime_sieve(_UpperCAmelCase )
A__ = 0
A__ = 0
A__ = primes[prime_index]
while (last_prime**2) <= limit:
A__ = primes[prime_index + 1]
A__ = last_prime**2
A__ = next_prime**2
# Get numbers divisible by lps(current)
A__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 87
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : int = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _snake_case ( A__ ):
_lowercase : Dict = '''cvt'''
def __init__( self , a=3 , a=[7, 3, 3] , a=[4, 2, 2] , a=[2, 1, 1] , a=[64, 192, 384] , a=[1, 3, 6] , a=[1, 2, 10] , a=[4.0, 4.0, 4.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.0] , a=[0.0, 0.0, 0.1] , a=[True, True, True] , a=[False, False, True] , a=["dw_bn", "dw_bn", "dw_bn"] , a=[3, 3, 3] , a=[1, 1, 1] , a=[2, 2, 2] , a=[1, 1, 1] , a=[1, 1, 1] , a=0.02 , a=1E-12 , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = patch_stride
SCREAMING_SNAKE_CASE = patch_padding
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = depth
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = attention_drop_rate
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = cls_token
SCREAMING_SNAKE_CASE = qkv_projection_method
SCREAMING_SNAKE_CASE = kernel_qkv
SCREAMING_SNAKE_CASE = padding_kv
SCREAMING_SNAKE_CASE = stride_kv
SCREAMING_SNAKE_CASE = padding_q
SCREAMING_SNAKE_CASE = stride_q
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
| 73
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ = "dict"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ )
def __call__( self : int ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ = "dict"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None
def __call__( self : Tuple ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages )
if self.languages and set(lowerCamelCase_ ) - lang_set:
raise ValueError(
f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE : List[Any] = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) )
return {"language": languages, "translation": translations}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 79
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ = "dict"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ )
def __call__( self : int ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ = "dict"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None
def __call__( self : Tuple ):
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages )
if self.languages and set(lowerCamelCase_ ) - lang_set:
raise ValueError(
f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE : List[Any] = []
for lang, text in translation_dict.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) )
return {"language": languages, "translation": translations}
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 79
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A_: List[str] = imread(R'digital_image_processing/image_data/lena_small.jpg')
A_: Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = cn.convert_to_negative(a_ )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCAmelCase ( ):
"""simple docstring"""
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ ,110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = canny.gen_gaussian_kernel(9 ,sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = imread("""digital_image_processing/image_data/lena_small.jpg""" ,0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowercase = canny.canny(a_ )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCAmelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(a_ ,5 ,sigma=0.9 ).all()
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
_lowercase = conv.img_convolve(a_ ,a_ ).astype(a_ )
assert res.any()
def __lowerCAmelCase ( ):
"""simple docstring"""
assert med.median_filter(a_ ,3 ).any()
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = sob.sobel_filter(a_ )
assert grad.any() and theta.any()
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = sp.make_sepia(a_ ,20 )
assert sepia.all()
def __lowerCAmelCase ( _A = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
_lowercase = bs.Burkes(imread(a_ ,1 ) ,120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCAmelCase ( _A = "digital_image_processing/image_data/lena_small.jpg" ,):
"""simple docstring"""
_lowercase = rs.NearestNeighbour(imread(a_ ,1 ) ,400 ,200 )
nn.process()
assert nn.output.any()
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
_lowercase = imread(a_ ,0 )
# Test for get_neighbors_pixel function() return not None
_lowercase = 0
_lowercase = 0
_lowercase = image[x_coordinate][y_coordinate]
_lowercase = lbp.get_neighbors_pixel(
a_ ,a_ ,a_ ,a_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowercase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 ,image.shape[0] ):
for j in range(0 ,image.shape[1] ):
_lowercase = lbp.local_binary_value(a_ ,a_ ,a_ )
assert lbp_image.any()
| 398
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52
| 0
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a_ ( lowerCamelCase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def a_ ( lowerCamelCase ):
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase__ = ord(lowerCamelCase )
if not _is_chinese_char(lowerCamelCase ):
return 0
return 1
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = set()
for token in tokens:
UpperCAmelCase__ = len(lowerCamelCase ) > 1 and is_chinese(lowerCamelCase )
if chinese_word:
word_set.add(lowerCamelCase )
UpperCAmelCase__ = list(lowerCamelCase )
return word_list
def a_ ( lowerCamelCase , lowerCamelCase ):
if not chinese_word_set:
return bert_tokens
UpperCAmelCase__ = max([len(lowerCamelCase ) for w in chinese_word_set] )
UpperCAmelCase__ = bert_tokens
UpperCAmelCase__ , UpperCAmelCase__ = 0, len(lowerCamelCase )
while start < end:
UpperCAmelCase__ = True
if is_chinese(bert_word[start] ):
UpperCAmelCase__ = min(end - start , lowerCamelCase )
for i in range(lowerCamelCase , 1 , -1 ):
UpperCAmelCase__ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase__ = '##' + bert_word[j]
UpperCAmelCase__ = start + i
UpperCAmelCase__ = False
break
if single_word:
start += 1
return bert_word
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
for i in range(0 , len(lowerCamelCase ) , 1_0_0 ):
UpperCAmelCase__ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws
UpperCAmelCase__ = [get_chinese_word(lowerCamelCase ) for r in res]
ltp_res.extend(lowerCamelCase )
assert len(lowerCamelCase ) == len(lowerCamelCase )
UpperCAmelCase__ = []
for i in range(0 , len(lowerCamelCase ) , 1_0_0 ):
UpperCAmelCase__ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(lowerCamelCase ) == len(lowerCamelCase )
UpperCAmelCase__ = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = []
for id in input_ids:
UpperCAmelCase__ = bert_tokenizer._convert_id_to_token(lowerCamelCase )
input_tokens.append(lowerCamelCase )
UpperCAmelCase__ = add_sub_symbol(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase ):
if token[:2] == "##":
UpperCAmelCase__ = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase ) == 1 and _is_chinese_char(ord(lowerCamelCase ) ):
ref_id.append(lowerCamelCase )
ref_ids.append(lowerCamelCase )
assert len(lowerCamelCase ) == len(lowerCamelCase )
return ref_ids
def a_ ( lowerCamelCase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = [line.strip() for line in data if len(lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase__ = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase__ = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCAmelCase__ = [json.dumps(lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[str] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
lowerCAmelCase__ : Optional[int] = parser.parse_args()
main(args)
| 632
|
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def a_ ( lowerCamelCase = 2_0_0_0_0_0_0 ):
UpperCAmelCase__ = [0]
UpperCAmelCase__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCAmelCase__ = 0
# the area corresponding to the grid that gives the product closest to target
UpperCAmelCase__ = 0
# an estimate of b, using the quadratic formula
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the largest integer less than b_estimate
UpperCAmelCase__ = 42
# the triangle number corresponding to b_floor
UpperCAmelCase__ = 42
# the triangle number corresponding to b_ceil
UpperCAmelCase__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
UpperCAmelCase__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCAmelCase__ = floor(lowerCamelCase )
UpperCAmelCase__ = ceil(lowerCamelCase )
UpperCAmelCase__ = triangle_numbers[b_floor]
UpperCAmelCase__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_first_guess * triangle_a
UpperCAmelCase__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase__ = triangle_b_second_guess * triangle_a
UpperCAmelCase__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 632
| 1
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : int , snake_case : Optional[int]=1e-12 )-> List[str]:
_lowerCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
_lowerCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T
return jnp.matmul(_A , norm_emb_a.T )
class __a ( nn.Module ):
SCREAMING_SNAKE_CASE__ : Tuple = 42
SCREAMING_SNAKE_CASE__ : Tuple = jnp.floataa
def snake_case_ ( self ):
_lowerCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
_lowerCamelCase = nn.Dense(self.config.projection_dim , use_bias=__snake_case , dtype=self.dtype )
_lowerCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
_lowerCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_lowerCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) )
_lowerCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self , a__ ):
_lowerCamelCase = self.vision_model(__snake_case )[1]
_lowerCamelCase = self.visual_projection(__snake_case )
_lowerCamelCase = jax_cosine_distance(__snake_case , self.special_care_embeds )
_lowerCamelCase = jax_cosine_distance(__snake_case , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_lowerCamelCase = 0.0
_lowerCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_lowerCamelCase = jnp.round(__snake_case , 3 )
_lowerCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__snake_case )
# Use a lower threshold if an image has any special care concept
_lowerCamelCase = is_special_care * 0.01
_lowerCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_lowerCamelCase = jnp.round(__snake_case , 3 )
_lowerCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __a ( lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Dict = CLIPConfig
SCREAMING_SNAKE_CASE__ : str = "clip_input"
SCREAMING_SNAKE_CASE__ : List[str] = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , a__ , a__ = None , a__ = 0 , a__ = jnp.floataa , a__ = True , **a__ , ):
if input_shape is None:
_lowerCamelCase = (1, 2_24, 2_24, 3)
_lowerCamelCase = self.module_class(config=__snake_case , dtype=__snake_case , **__snake_case )
super().__init__(__snake_case , __snake_case , input_shape=__snake_case , seed=__snake_case , dtype=__snake_case , _do_init=_do_init )
def snake_case_ ( self , a__ , a__ , a__ = None ):
# init input tensor
_lowerCamelCase = jax.random.normal(__snake_case , __snake_case )
_lowerCamelCase = jax.random.split(__snake_case )
_lowerCamelCase = {'params': params_rng, 'dropout': dropout_rng}
_lowerCamelCase = self.module.init(__snake_case , __snake_case )['params']
return random_params
def __call__( self , a__ , a__ = None , ):
_lowerCamelCase = jnp.transpose(__snake_case , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(__snake_case , dtype=jnp.floataa ) , rngs={} , )
| 650
|
'''simple docstring'''
import numpy as np
def lowerCamelCase__ ( _A ):
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase__ ( _A ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526
| 0
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 216
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BioGptTokenizer
a_ = False
def A ( self : int ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ : List[str] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase_ : Tuple = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_A ) )
def A ( self : List[Any] , _A : Optional[Any] ) -> Any:
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : Tuple = '''lower newer'''
return input_text, output_text
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ : Tuple = '''lower'''
UpperCAmelCase_ : List[Any] = ['''low''', '''er</w>''']
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Any = tokens + ['''<unk>''']
UpperCAmelCase_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
@slow
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
UpperCAmelCase_ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 216
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Tuple = CLIPTokenizer
_A : Any = CLIPTokenizerFast
_A : List[str] = True
_A : List[str] = {}
_A : Any = False
def __UpperCamelCase (self ):
super().setUp()
# fmt: off
snake_case_ : List[Any] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case_ : Tuple = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : Any = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
snake_case_ : Dict = {"""unk_token""": """<unk>"""}
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase__ ) )
def __UpperCamelCase (self , **lowercase__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , **lowercase__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : str = """lower newer"""
snake_case_ : List[Any] = """lower newer"""
return input_text, output_text
def __UpperCamelCase (self ):
snake_case_ : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Union[str, Any] = """lower newer"""
snake_case_ : str = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
snake_case_ : Tuple = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Optional[int] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
@require_ftfy
def __UpperCamelCase (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
snake_case_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
snake_case_ : Dict = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
snake_case_ : Any = tokenizer_s.tokenize(lowercase__ )
snake_case_ : Dict = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case_ : str = """xa\u0303y""" + """ """ + """x\xe3y"""
snake_case_ : Optional[int] = tokenizer_s.tokenize(lowercase__ )
snake_case_ : Dict = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of space type
snake_case_ : List[Any] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case_ : Tuple = tokenizer_s.tokenize(lowercase__ )
snake_case_ : int = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Test that the tokenization is identical on unicode of line break type
snake_case_ : Optional[int] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case_ : List[Any] = tokenizer_s.tokenize(lowercase__ )
snake_case_ : int = tokenizer_r.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Optional[Any] = f'{text_of_1_token} {text_of_1_token}'
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
snake_case_ : Dict = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
snake_case_ : Optional[Any] = f' {text}'
snake_case_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , )
snake_case_ : Tuple = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
def __UpperCamelCase (self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase__ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __UpperCamelCase (self ):
super().test_tokenization_python_rust_equals()
def __UpperCamelCase (self ):
# CLIP always lower cases letters
pass
| 480
|
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ):
super().__init__(
lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , )
snake_case_ : Tuple = path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths}
snake_case_ : Dict = Text(
cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , **lowercase__ , )
def __UpperCamelCase (self ):
# Build iterable dataset
if self.streaming:
snake_case_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ : str = None
snake_case_ : int = None
snake_case_ : int = None
snake_case_ : int = None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , )
snake_case_ : Dict = self.builder.as_dataset(
split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory )
return dataset
| 480
| 1
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase :str = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCamelCase :Optional[Any] = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
UpperCamelCase :str = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCamelCase :Any = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCamelCase :Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = dct.pop(__magic_name__ )
UpperCamelCase :Tuple = val
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
UpperCamelCase :List[str] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase :Optional[Any] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
UpperCamelCase :Dict = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :str = ViTConfig(image_size=384 , qkv_bias=__magic_name__ )
UpperCamelCase :List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCamelCase :Optional[int] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCamelCase :int = 1024
UpperCamelCase :Any = 4096
UpperCamelCase :int = 24
UpperCamelCase :Any = 16
UpperCamelCase :Tuple = 1024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCamelCase :str = False
UpperCamelCase :int = """relu"""
UpperCamelCase :Dict = 1024
UpperCamelCase :Any = True
UpperCamelCase :Any = False
UpperCamelCase :Optional[int] = False
# load HuggingFace model
UpperCamelCase :Optional[Any] = ViTModel(__magic_name__ , add_pooling_layer=__magic_name__ )
UpperCamelCase :Union[str, Any] = TrOCRForCausalLM(__magic_name__ )
UpperCamelCase :Optional[int] = VisionEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
model.eval()
# load state_dict of original model, rename some keys
UpperCamelCase :List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="""cpu""" , check_hash=__magic_name__ )["""model"""]
UpperCamelCase :Union[str, Any] = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCamelCase :Union[str, Any] = state_dict.pop(__magic_name__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
UpperCamelCase :int = val
else:
UpperCamelCase :Union[str, Any] = val
# load state dict
model.load_state_dict(__magic_name__ )
# Check outputs on an image
UpperCamelCase :Dict = ViTImageProcessor(size=encoder_config.image_size )
UpperCamelCase :int = RobertaTokenizer.from_pretrained("""roberta-large""" )
UpperCamelCase :int = TrOCRProcessor(__magic_name__ , __magic_name__ )
UpperCamelCase :Any = processor(images=prepare_img(__magic_name__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
UpperCamelCase :Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCamelCase :List[Any] = model(pixel_values=__magic_name__ , decoder_input_ids=__magic_name__ )
UpperCamelCase :List[Any] = outputs.logits
UpperCamelCase :Optional[int] = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCamelCase :List[Any] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCamelCase :Union[str, Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCamelCase :int = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCamelCase :Optional[int] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __magic_name__ , atol=1E-3 ), "First elements of logits not as expected"
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 701
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : str ) -> str:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :List[str] = tmp_path / """cache"""
UpperCamelCase :str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase :List[str] = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase :Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase :List[Any] = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase :Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__magic_name__ , cache_dir=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(__magic_name__ ) ) as con:
UpperCamelCase :Optional[int] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Any = tmp_path / """cache"""
UpperCamelCase :int = os.path.join(__magic_name__ , """tmp.sql""" )
UpperCamelCase :Tuple = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
UpperCamelCase :List[str] = iter_sql_file(__magic_name__ )
UpperCamelCase :Optional[int] = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :List[Any] = tmp_path / """cache"""
UpperCamelCase :Optional[Any] = os.path.join(__magic_name__ , """tmp.sql""" )
UpperCamelCase :List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
UpperCamelCase :List[str] = iter_sql_file(__magic_name__ )
UpperCamelCase :Any = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :int = tmp_path / """cache"""
UpperCamelCase :Dict = os.path.join(__magic_name__ , """tmp.sql""" )
UpperCamelCase :List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ ).read()
with pytest.raises(__magic_name__ ):
SqlDatasetWriter(__magic_name__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 590
| 0
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
def a__ ( A_, A_ ):
'''simple docstring'''
def run_func(A_ ):
@wraps(A_ )
def run_in_eager_mode(*A_, **A_ ):
return func(*A_, **A_ )
@wraps(A_ )
@tf.function(experimental_compile=A_ )
def run_in_graph_mode(*A_, **A_ ):
return func(*A_, **A_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = random.Random()
__magic_name__ = [rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(A_, shape=(batch_size, sequence_length), dtype=tf.intaa )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
a__ = 42
a__ = "TensorFlow"
@property
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
return tf.__version__
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float:
"""simple docstring"""
__magic_name__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_inference )
def _lowercase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float:
"""simple docstring"""
__magic_name__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_speed(_train )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
__magic_name__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ = self._prepare_inference_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_inference )
def _lowercase ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase__ )
__magic_name__ = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ = self._prepare_train_func(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self._measure_memory(_train )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Callable[[], None]:
"""simple docstring"""
__magic_name__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__magic_name__ = (
hasattr(UpperCamelCase__ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ = __import__("""transformers""" , fromlist=[model_class] )
__magic_name__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__magic_name__ = TF_MODEL_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
__magic_name__ = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size
__magic_name__ = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , training=UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase__ , training=UpperCamelCase__ )
__magic_name__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Callable[[], None]:
"""simple docstring"""
__magic_name__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__magic_name__ = (
hasattr(UpperCamelCase__ , """architectures""" )
and isinstance(config.architectures , UpperCamelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ = __import__("""transformers""" , fromlist=[model_class] )
__magic_name__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = model_cls(UpperCamelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__magic_name__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase__ )
# encoder-decoder has vocab size saved differently
__magic_name__ = config.vocab_size if hasattr(UpperCamelCase__ , """vocab_size""" ) else config.encoder.vocab_size
__magic_name__ = random_input_ids(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__magic_name__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
__magic_name__ = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__magic_name__ = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )[0]
__magic_name__ = tf.gradients(UpperCamelCase__ , model.trainable_variables )
return gradients
__magic_name__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowercase ( self : int , UpperCamelCase__ : str ) -> float:
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(UpperCamelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__magic_name__ = timeit.repeat(
UpperCamelCase__ , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def _lowercase ( self : int , UpperCamelCase__ : Callable[[], None] ) -> [Memory, MemorySummary]:
"""simple docstring"""
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
__magic_name__ = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
__magic_name__ = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
__magic_name__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__magic_name__ = nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase__ )
__magic_name__ = meminfo.used
__magic_name__ = Memory(UpperCamelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
__magic_name__ = None
else:
__magic_name__ = measure_peak_memory_cpu(UpperCamelCase__ )
__magic_name__ = Memory(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__magic_name__ = stop_memory_tracing(UpperCamelCase__ )
if memory is None:
__magic_name__ = summary.total
else:
__magic_name__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 529
|
def a__ ( A_=28123 ):
'''simple docstring'''
__magic_name__ = [1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
__magic_name__ = set()
__magic_name__ = 0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(A_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 529
| 1
|
import math
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return math.pow(__A , 2 ) - a
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
return 2 * x
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =2.0
while start <= a:
__UpperCamelCase =math.pow(__A , 2 )
return start
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any = 99_99 , SCREAMING_SNAKE_CASE__ : Optional[int] = 0.00000000000001 ):
if a < 0:
raise ValueError('math domain error' )
__UpperCamelCase =get_initial_point(__A )
for _ in range(__A ):
__UpperCamelCase =value
__UpperCamelCase =value - fx(__A , __A ) / fx_derivative(__A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _snake_case :
UpperCamelCase__ : int
UpperCamelCase__ : TreeNode | None =None
UpperCamelCase__ : TreeNode | None =None
lowercase_ = namedtuple("""CoinsDistribResult""", """moves excess""")
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE_ ) != count_coins(SCREAMING_SNAKE_CASE_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ , lowercase__ = get_distrib(node.left )
lowercase__ , lowercase__ = get_distrib(node.right )
lowercase__ = 1 - left_distrib_excess
lowercase__ = 1 - right_distrib_excess
lowercase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE_ )
+ abs(SCREAMING_SNAKE_CASE_ )
)
lowercase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return get_distrib(SCREAMING_SNAKE_CASE_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("only integers accepted as input" )
else:
lowercase__ = str(abs(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = [list(SCREAMING_SNAKE_CASE_ ) for char in range(len(SCREAMING_SNAKE_CASE_ ) )]
for index in range(len(SCREAMING_SNAKE_CASE_ ) ):
num_transpositions[index].pop(SCREAMING_SNAKE_CASE_ )
return max(
int("".join(list(SCREAMING_SNAKE_CASE_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 413
| 1
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int:
'''simple docstring'''
super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
self.check_model_type(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase , __lowercase = {}, {}
if padding is not None:
__lowercase = padding
if truncation is not None:
__lowercase = truncation
if top_k is not None:
__lowercase = top_k
return preprocess_params, {}, postprocess_params
def __call__(self ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> str:
'''simple docstring'''
if isinstance(_lowerCamelCase ,(Image.Image, str) ) and isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = {'''image''': image, '''question''': question}
else:
__lowercase = image
__lowercase = super().__call__(_lowerCamelCase ,**_lowerCamelCase )
return results
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=False ,_lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
__lowercase = load_image(inputs['''image'''] )
__lowercase = self.tokenizer(
inputs['''question'''] ,return_tensors=self.framework ,padding=_lowerCamelCase ,truncation=_lowerCamelCase )
__lowercase = self.image_processor(images=_lowerCamelCase ,return_tensors=self.framework )
model_inputs.update(_lowerCamelCase )
return model_inputs
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.model(**_lowerCamelCase )
return model_outputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=5 ) -> Tuple:
'''simple docstring'''
if top_k > self.model.config.num_labels:
__lowercase = self.model.config.num_labels
if self.framework == "pt":
__lowercase = model_outputs.logits.sigmoid()[0]
__lowercase , __lowercase = probs.topk(_lowerCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowercase = scores.tolist()
__lowercase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase ,_lowerCamelCase )]
| 56
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
_SCREAMING_SNAKE_CASE = {
'''gpt-neox-20b''': 2_0_4_8,
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : List[Any] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = ["input_ids", "attention_mask"]
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase="<|endoftext|>" ,_lowerCamelCase=False ,**_lowerCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,unk_token=_lowerCamelCase ,bos_token=_lowerCamelCase ,eos_token=_lowerCamelCase ,add_prefix_space=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' ,_lowerCamelCase ) != add_prefix_space:
__lowercase = getattr(_lowerCamelCase ,pre_tok_state.pop('''type''' ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**_lowerCamelCase )
__lowercase = add_prefix_space
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 56
| 1
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_lowerCAmelCase = logging.getLogger(__name__)
class UpperCamelCase :
def __init__( self :Dict ) ->int:
lowercase : Union[str, Any] = False
def __snake_case ( self :int , __magic_name__ :str , __magic_name__ :str , __magic_name__ :Any , __magic_name__ :Optional[int] ) ->List[str]:
if not self.initialized:
lowercase : Dict = RagRetriever(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
lowercase : Any = True
def __snake_case ( self :Tuple ) ->Optional[int]:
self.retriever.index.init_index()
def __snake_case ( self :str , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any] ) ->Union[str, Any]:
lowercase , lowercase : Dict = self.retriever._main_retrieve(__A , __A )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase (__snake_case ):
def __init__( self :Optional[int] , __magic_name__ :Optional[int] , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :List[str]=None ) ->List[str]:
if index is not None and index.is_initialized() and len(__A ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you\'ll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
lowercase : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__A , __A , __A , __A )
for worker in self.retrieval_workers
] )
def __snake_case ( self :Optional[Any] ) ->Tuple:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __snake_case ( self :Union[str, Any] , __magic_name__ :Any , __magic_name__ :List[str] ) ->Optional[Any]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowercase : Tuple = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowercase , lowercase : Union[str, Any] = ray.get(random_worker.retrieve.remote(__A , __A ) )
else:
lowercase , lowercase : List[Any] = self._main_retrieve(__A , __A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__A )
@classmethod
def __snake_case ( cls :Any , __magic_name__ :List[Any] , __magic_name__ :Optional[int]=None , **__magic_name__ :str ) ->Any:
return super(__A , cls ).get_tokenizers(__A , __A , **__A )
@classmethod
def __snake_case ( cls :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Optional[int] , __magic_name__ :Dict=None , **__magic_name__ :Optional[int] ) ->List[str]:
lowercase : int = kwargs.pop("""config""" , __A ) or RagConfig.from_pretrained(__A , **__A )
lowercase : List[str] = RagTokenizer.from_pretrained(__A , config=__A )
lowercase : Optional[int] = rag_tokenizer.question_encoder
lowercase : Union[str, Any] = rag_tokenizer.generator
if indexed_dataset is not None:
lowercase : List[Any] = """custom"""
lowercase : int = CustomHFIndex(config.retrieval_vector_size , __A )
else:
lowercase : List[str] = cls._build_index(__A )
return cls(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , retrieval_workers=__A , index=__A , )
| 264
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A = logging.get_logger('transformers.models.encodec')
__A = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__A = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__A = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__A = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__A = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A = []
__A = []
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_A = getattr(_lowercase , _lowercase )
if weight_type is not None:
_A = getattr(_lowercase , _lowercase ).shape
else:
_A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
elif weight_type == "running_mean":
_A = value
elif weight_type == "running_var":
_A = value
elif weight_type == "num_batches_tracked":
_A = value
elif weight_type == "weight_ih_l0":
_A = value
elif weight_type == "weight_hh_l0":
_A = value
elif weight_type == "bias_ih_l0":
_A = value
elif weight_type == "bias_hh_l0":
_A = value
elif weight_type == "weight_ih_l1":
_A = value
elif weight_type == "weight_hh_l1":
_A = value
elif weight_type == "bias_ih_l1":
_A = value
elif weight_type == "bias_hh_l1":
_A = value
else:
_A = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = []
if model_name == "encodec_24khz" or "encodec_32khz":
_A = MAPPING_24K
elif model_name == "encodec_48khz":
_A = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(f"""{name} was ignored""" )
continue
_A = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
_A = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_A = True
if "*" in mapped_key:
_A = name.split(_lowercase )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight_ih_l0" in name:
_A = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
_A = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
_A = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
_A = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
_A = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
_A = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
_A = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
_A = '''bias_hh_l1'''
elif "bias" in name:
_A = '''bias'''
elif "weight" in name:
_A = '''weight'''
elif "running_mean" in name:
_A = '''running_mean'''
elif "running_var" in name:
_A = '''running_var'''
elif "num_batches_tracked" in name:
_A = '''num_batches_tracked'''
else:
_A = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
if config_path is not None:
_A = EncodecConfig.from_pretrained(_lowercase )
else:
_A = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_A = [8, 5, 4, 4]
_A = [2.2]
_A = 64
_A = 3_20_00
_A = 20_48
_A = False
_A = False
_A = False
elif model_name == "encodec_48khz":
_A = [8, 5, 4, 2]
_A = [3.0, 6.0, 12.0, 24.0]
_A = 4_80_00
_A = 2
_A = False
_A = '''time_group_norm'''
_A = True
_A = 1.0
_A = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_A = EncodecModel(_lowercase )
_A = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowercase )
_A = torch.load(_lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_A = original_checkpoint['''best_state''']
recursively_load_weights(_lowercase , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 484
| 0
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 719
|
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowerCamelCase = """bert-base-cased"""
_lowerCamelCase = """google/pegasus-xsum"""
_lowerCamelCase = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
_lowerCamelCase = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
_lowerCamelCase = """patrickvonplaten/t5-tiny-random"""
_lowerCamelCase = """sshleifer/bart-tiny-random"""
_lowerCamelCase = """sshleifer/tiny-mbart"""
_lowerCamelCase = """sshleifer/tiny-marian-en-de"""
def a__ ( _SCREAMING_SNAKE_CASE : Path , _SCREAMING_SNAKE_CASE : list ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[str] = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}.source''' ) , _SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}.target''' ) , _SCREAMING_SNAKE_CASE )
return tmp_dir
class _snake_case (__SCREAMING_SNAKE_CASE):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
@slow
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_snake_case )
UpperCAmelCase_ : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Optional[int] = max(len(tokenizer.encode(_snake_case ) ) for a in ARTICLES )
UpperCAmelCase_ : List[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in SUMMARIES )
UpperCAmelCase_ : Optional[int] = 4
UpperCAmelCase_ : Union[str, Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase_ , UpperCAmelCase_ : int = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
UpperCAmelCase_ : Tuple = SeqaSeqDataset(
_snake_case ,data_dir=_snake_case ,type_path="train" ,max_source_length=_snake_case ,max_target_length=_snake_case ,src_lang=_snake_case ,tgt_lang=_snake_case ,)
UpperCAmelCase_ : List[Any] = DataLoader(_snake_case ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_snake_case ,_snake_case )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase_ : Union[str, Any] = shift_tokens_right(batch["labels"] ,tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_snake_case )
UpperCAmelCase_ : List[str] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
UpperCAmelCase_ : Optional[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in ARTICLES )
UpperCAmelCase_ : Optional[Any] = max(len(tokenizer.encode(_snake_case ) ) for a in SUMMARIES )
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Dict = LegacySeqaSeqDataset(
_snake_case ,data_dir=_snake_case ,type_path="train" ,max_source_length=20 ,max_target_length=_snake_case ,)
UpperCAmelCase_ : Optional[int] = DataLoader(_snake_case ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
UpperCAmelCase_ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
UpperCAmelCase_ : Optional[int] = tmp_dir.joinpath("train.source" ).open().readlines()
UpperCAmelCase_ : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_snake_case ,_snake_case ,1_28 ,_snake_case )
UpperCAmelCase_ : Dict = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase_ : str = {x.name for x in save_dir.iterdir()}
UpperCAmelCase_ : List[str] = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_snake_case ) < len(_snake_case )
assert len(_snake_case ) == 1
assert len(packed_examples[0] ) == sum(len(_snake_case ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason="This test requires fairseq" )
def UpperCamelCase__ ( self ):
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._get_dataset(max_len=64 )
UpperCAmelCase_ : int = 64
UpperCAmelCase_ : str = ds.make_dynamic_sampler(_snake_case ,required_batch_size_multiple=_snake_case )
UpperCAmelCase_ : Dict = [len(_snake_case ) for x in batch_sampler]
assert len(set(_snake_case ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_snake_case ) == len(_snake_case ) # no dropped or added examples
UpperCAmelCase_ : Any = DataLoader(_snake_case ,batch_sampler=_snake_case ,collate_fn=ds.collate_fn ,num_workers=2 )
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
for batch in data_loader:
UpperCAmelCase_ : Any = batch["input_ids"].shape
UpperCAmelCase_ : Optional[Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase_ : int = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_snake_case )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_snake_case )
assert num_src_per_batch[0] == max(_snake_case )
if failures:
raise AssertionError(f'''too many tokens in {len(_snake_case )} batches''' )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._get_dataset(max_len=5_12 )
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Optional[int] = ds.make_sortish_sampler(_snake_case ,shuffle=_snake_case )
UpperCAmelCase_ : Optional[int] = DataLoader(_snake_case ,batch_size=_snake_case ,collate_fn=ds.collate_fn ,num_workers=2 )
UpperCAmelCase_ : Union[str, Any] = DataLoader(_snake_case ,batch_size=_snake_case ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.pad_token_id
def count_pad_tokens(_snake_case ,_snake_case="input_ids" ):
return [batch[k].eq(_snake_case ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_snake_case ,k="labels" ) ) < sum(count_pad_tokens(_snake_case ,k="labels" ) )
assert sum(count_pad_tokens(_snake_case ) ) < sum(count_pad_tokens(_snake_case ) )
assert len(_snake_case ) == len(_snake_case )
def UpperCamelCase__ ( self ,_snake_case=10_00 ,_snake_case=1_28 ):
if os.getenv("USE_REAL_DATA" ,_snake_case ):
UpperCAmelCase_ : List[Any] = "examples/seq2seq/wmt_en_ro"
UpperCAmelCase_ : Dict = max_len * 2 * 64
if not Path(_snake_case ).joinpath("train.len" ).exists():
save_len_file(_snake_case ,_snake_case )
else:
UpperCAmelCase_ : Optional[Any] = "examples/seq2seq/test_data/wmt_en_ro"
UpperCAmelCase_ : Union[str, Any] = max_len * 4
save_len_file(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(_snake_case )
UpperCAmelCase_ : Union[str, Any] = SeqaSeqDataset(
_snake_case ,data_dir=_snake_case ,type_path="train" ,max_source_length=_snake_case ,max_target_length=_snake_case ,n_obs=_snake_case ,)
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._get_dataset()
UpperCAmelCase_ : Tuple = set(DistributedSortishSampler(_snake_case ,2_56 ,num_replicas=2 ,rank=0 ,add_extra_examples=_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = set(DistributedSortishSampler(_snake_case ,2_56 ,num_replicas=2 ,rank=1 ,add_extra_examples=_snake_case ) )
assert idsa.intersection(_snake_case ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_snake_case ,use_fast=_snake_case )
if tok_name == MBART_TINY:
UpperCAmelCase_ : int = SeqaSeqDataset(
_snake_case ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="train" ,max_source_length=4 ,max_target_length=8 ,src_lang="EN" ,tgt_lang="FR" ,)
UpperCAmelCase_ : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase_ : int = SeqaSeqDataset(
_snake_case ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path="train" ,max_source_length=4 ,max_target_length=8 ,)
UpperCAmelCase_ : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_snake_case ) == 1 if tok_name == BART_TINY else len(_snake_case ) == 0
| 323
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_UpperCamelCase = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
_UpperCamelCase = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
_UpperCamelCase = "▁"
# Segments (not really needed)
_UpperCamelCase = 0
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = 3
_UpperCamelCase = 4
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = 'left'
_UpperCamelCase : int = XLNetTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_=False , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<sep>" , snake_case_="<pad>" , snake_case_="<cls>" , snake_case_="<mask>" , snake_case_=["<eop>", "<eod>"] , **snake_case_ , ):
'''simple docstring'''
A__ : Tuple = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
A__ : int = 3
A__ : Any = do_lower_case
A__ : List[str] = remove_space
A__ : Dict = keep_accents
A__ : Optional[int] = vocab_file
A__ : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : Dict = [self.sep_token_id]
A__ : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : int = [self.sep_token_id]
A__ : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 363
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=3 , snake_case_=32 , snake_case_=3 , snake_case_=10 , snake_case_=[10, 20, 30, 40] , snake_case_=[1, 1, 2, 1] , snake_case_=True , snake_case_=True , snake_case_="relu" , snake_case_=3 , snake_case_=None , ):
'''simple docstring'''
A__ : Tuple = parent
A__ : int = batch_size
A__ : List[Any] = image_size
A__ : Any = num_channels
A__ : Tuple = embeddings_size
A__ : Union[str, Any] = hidden_sizes
A__ : Tuple = depths
A__ : Union[str, Any] = is_training
A__ : Any = use_labels
A__ : Optional[Any] = hidden_act
A__ : str = num_labels
A__ : Optional[Any] = scope
A__ : List[Any] = len(snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : str = None
if self.use_labels:
A__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A__ : Dict = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : Union[str, Any] = TFRegNetModel(config=snake_case_ )
A__ : Optional[int] = model(snake_case_ , training=snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : Optional[int] = self.num_labels
A__ : Tuple = TFRegNetForImageClassification(snake_case_ )
A__ : Optional[int] = model(snake_case_ , labels=snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (__A , __A , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : str = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_UpperCamelCase : Tuple = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : int = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
_UpperCamelCase : Union[str, Any] = False
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Dict = TFRegNetModelTester(self )
A__ : str = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = model_class(snake_case_ )
A__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Union[str, Any] = [*signature.parameters.keys()]
A__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
A__ : str = model_class(snake_case_ )
A__ : Union[str, Any] = model(**self._prepare_for_class(snake_case_ , snake_case_ ) , training=snake_case_ )
A__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A__ : Optional[Any] = layer_type
A__ : Dict = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(snake_case_ , snake_case_ , snake_case_ , snake_case_={} ):
A__ : List[str] = model(snake_case_ , return_dict=snake_case_ , **snake_case_ )
A__ : Union[str, Any] = model(snake_case_ , return_dict=snake_case_ , **snake_case_ ).to_tuple()
def recursive_check(snake_case_ , snake_case_ ):
if isinstance(snake_case_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case_ , snake_case_ ):
recursive_check(snake_case_ , snake_case_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(snake_case_ , snake_case_ ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(snake_case_ , snake_case_ )
for model_class in self.all_model_classes:
A__ : Any = model_class(snake_case_ )
A__ : Dict = self._prepare_for_class(snake_case_ , snake_case_ )
A__ : List[Any] = self._prepare_for_class(snake_case_ , snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ )
A__ : Tuple = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
A__ : List[str] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ )
A__ : Optional[int] = self._prepare_for_class(snake_case_ , snake_case_ )
A__ : List[str] = self._prepare_for_class(snake_case_ , snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ , {"""output_hidden_states""": True} )
A__ : Dict = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
A__ : Union[str, Any] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
check_equivalence(snake_case_ , snake_case_ , snake_case_ , {"""output_hidden_states""": True} )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Any = TFRegNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _A( ):
A__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A__ : str = self.default_image_processor
A__ : Any = prepare_img()
A__ : Tuple = image_processor(images=snake_case_ , return_tensors="""tf""" )
# forward pass
A__ : List[Any] = model(**snake_case_ , training=snake_case_ )
# verify the logits
A__ : Tuple = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ : Tuple = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , snake_case_ , atol=1E-4 )
| 363
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=99 , UpperCAmelCase__=32 , UpperCAmelCase__=5 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=512 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=0.02 , UpperCAmelCase__=4 , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_attention_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_choices
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[int] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("roberta-base" , from_pt=__snake_case )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
| 705
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = 1
@register_to_config
def __init__( self , UpperCAmelCase__=2000 , UpperCAmelCase__=0.1 , UpperCAmelCase__=20 , UpperCAmelCase__=1e-3 ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase__ , device=UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE__ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE__ = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE__ = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = -score / std
# compute
SCREAMING_SNAKE_CASE__ = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE__ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE__ = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE__ = torch.sqrt(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE__ = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE__ = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase__ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE__ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 112
| 0
|
from math import factorial, radians
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1_8 , _SCREAMING_SNAKE_CASE = 1_0 ):
__lowercase = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
__lowercase = radians(__a )
__lowercase = angle_in_radians
__lowercase = 3
__lowercase = -1
for _ in range(__a ):
result += (b * (angle_in_radians**a)) / factorial(__a )
__lowercase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__a , __a )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 402
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_a = StableDiffusionInstructPixaPixPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
_a = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCamelCase__ :List[str] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
UpperCamelCase__ :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase__ :Tuple = CLIPTextModel(UpperCamelCase_ )
UpperCamelCase__ :str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ :Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ :List[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' )
if str(UpperCamelCase_ ).startswith('''mps''' ):
UpperCamelCase__ :Tuple = torch.manual_seed(UpperCamelCase_ )
else:
UpperCamelCase__ :List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :str = self.get_dummy_components()
UpperCamelCase__ :Optional[Any] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Dict = sd_pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Optional[Any] = self.get_dummy_components()
UpperCamelCase__ :Any = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = '''french fries'''
UpperCamelCase__ :Any = sd_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = output.images
UpperCamelCase__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Dict = self.get_dummy_components()
UpperCamelCase__ :str = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :Any = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Dict = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :str = [inputs['''prompt''']] * 2
UpperCamelCase__ :str = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
UpperCamelCase__ :int = torch.from_numpy(UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = image / 2 + 0.5
UpperCamelCase__ :Union[str, Any] = image.permute(0 , 3 , 1 , 2 )
UpperCamelCase__ :Union[str, Any] = image.repeat(2 , 1 , 1 , 1 )
UpperCamelCase__ :Optional[Any] = sd_pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCamelCase__ :List[Any] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ :Tuple = self.get_dummy_components()
UpperCamelCase__ :Union[str, Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
UpperCamelCase__ :Optional[int] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :str = self.get_dummy_inputs(UpperCamelCase_ )
UpperCamelCase__ :str = sd_pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :Tuple = image[0, -3:, -3:, -1]
UpperCamelCase__ :Dict = [round(UpperCamelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(UpperCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ :Union[str, Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_dummy_components()
UpperCamelCase__ :Optional[Any] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase_ )
UpperCamelCase__ :List[Any] = VaeImageProcessor(do_resize=UpperCamelCase_ , do_normalize=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type='''pt''' ) )[0]
UpperCamelCase__ :List[Any] = components['''vae''']
UpperCamelCase__ :Optional[Any] = self.get_dummy_inputs_by_type(UpperCamelCase_ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCamelCase__ :List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCamelCase__ :str = pipe(**UpperCamelCase_ )[0]
UpperCamelCase__ :Optional[int] = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase_ , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self , UpperCamelCase_=0 ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = torch.manual_seed(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
UpperCamelCase__ :Dict = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Optional[int] = self.get_inputs()
UpperCamelCase__ :str = pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :str = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ )
UpperCamelCase__ :Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :int = self.get_inputs()
UpperCamelCase__ :List[str] = pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :Optional[int] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ )
UpperCamelCase__ :str = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Union[str, Any] = self.get_inputs()
UpperCamelCase__ :Dict = pipe(**UpperCamelCase_ ).images
UpperCamelCase__ :Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ :Tuple = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = 0
def callback_fn(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> None:
UpperCamelCase__ :Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase__ :Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ :Optional[Any] = latents[0, -3:, -3:, -1]
UpperCamelCase__ :List[str] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCamelCase__ :Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCamelCase__ :Union[str, Any] = latents[0, -3:, -3:, -1]
UpperCamelCase__ :List[Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCamelCase__ :Union[str, Any] = False
UpperCamelCase__ :Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCamelCase__ :Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :Dict = self.get_inputs()
pipe(**UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ :Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCamelCase_ , torch_dtype=torch.floataa )
UpperCamelCase__ :List[str] = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ :Any = self.get_inputs()
UpperCamelCase__ :Tuple = pipe(**UpperCamelCase_ )
UpperCamelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase__ :int = inputs['''image'''].resize((504, 504) )
UpperCamelCase__ :Union[str, Any] = '''timbrooks/instruct-pix2pix'''
UpperCamelCase__ :str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
UpperCamelCase__ :List[str] = pipe(**UpperCamelCase_ )
UpperCamelCase__ :str = output.images[0]
UpperCamelCase__ :str = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
UpperCamelCase__ :Dict = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 189
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = "swin2sr"
lowerCamelCase__ : Any = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a=6_4 , a=1 , a=3 , a=1_8_0 , a=[6, 6, 6, 6, 6, 6] , a=[6, 6, 6, 6, 6, 6] , a=8 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=0.02 , a=1e-5 , a=2 , a=1.0 , a="1conv" , a="pixelshuffle" , **a , ) -> Optional[Any]:
super().__init__(**a )
lowercase__ : str = image_size
lowercase__ : Tuple = patch_size
lowercase__ : List[Any] = num_channels
lowercase__ : Tuple = embed_dim
lowercase__ : int = depths
lowercase__ : int = len(a )
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : Dict = mlp_ratio
lowercase__ : List[Any] = qkv_bias
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Optional[int] = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Any = use_absolute_embeddings
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : str = initializer_range
lowercase__ : List[Any] = upscale
lowercase__ : List[Any] = img_range
lowercase__ : str = resi_connection
lowercase__ : Any = upsampler
| 645
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
@staticmethod
def _UpperCAmelCase ( *a , **a ) -> int:
pass
def a_ ( _lowerCAmelCase : Image ):
'''simple docstring'''
lowercase__ : List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _UpperCAmelCase ( self , a , a ) -> Optional[int]:
lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a )
import datasets
lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowercase__ : List[Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , a , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Tuple = 'Intel/dpt-large'
lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a )
lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowercase__ : Optional[Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 645
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a: Tuple = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__a: Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''mask2former'''
_lowerCamelCase = ['''swin''']
_lowerCamelCase = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Tuple , lowerCamelCase : Optional[Dict] = None , lowerCamelCase : int = 256 , lowerCamelCase : int = 256 , lowerCamelCase : int = 256 , lowerCamelCase : int = 1024 , lowerCamelCase : str = "relu" , lowerCamelCase : int = 6 , lowerCamelCase : int = 10 , lowerCamelCase : int = 8 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 2048 , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : int = 4 , lowerCamelCase : int = 255 , lowerCamelCase : int = 100 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 5.0 , lowerCamelCase : float = 5.0 , lowerCamelCase : int = 1_2544 , lowerCamelCase : float = 3.0 , lowerCamelCase : float = 0.75 , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 1.0 , lowerCamelCase : bool = True , lowerCamelCase : List[int] = [4, 8, 16, 32] , lowerCamelCase : bool = None , **lowerCamelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCamelCase , lowerCamelCase ):
_UpperCAmelCase = backbone_config.pop("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = feature_size
_UpperCAmelCase = mask_feature_size
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = encoder_feedforward_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = dim_feedforward
_UpperCAmelCase = pre_norm
_UpperCAmelCase = enforce_input_projection
_UpperCAmelCase = common_stride
_UpperCAmelCase = ignore_value
_UpperCAmelCase = num_queries
_UpperCAmelCase = no_object_weight
_UpperCAmelCase = class_weight
_UpperCAmelCase = mask_weight
_UpperCAmelCase = dice_weight
_UpperCAmelCase = train_num_points
_UpperCAmelCase = oversample_ratio
_UpperCAmelCase = importance_sample_ratio
_UpperCAmelCase = init_std
_UpperCAmelCase = init_xavier_std
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = feature_strides
_UpperCAmelCase = output_auxiliary_logits
_UpperCAmelCase = decoder_layers
super().__init__(**lowerCamelCase )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , lowerCamelCase : PretrainedConfig , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return cls(
backbone_config=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( self : Optional[Any] ) -> Dict[str, any]:
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 108
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ = 1_6
lowerCamelCase_ = 3_2
def lowerCamelCase ( a_ , a_ = 16 ) -> Tuple:
lowerCAmelCase_ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase_ = load_dataset('glue' , 'mrpc' )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
a_ , padding='longest' , max_length=a_ , pad_to_multiple_of=a_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
lowerCAmelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase ( a_ , a_ ) -> Dict:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , a_ ) == "1":
lowerCAmelCase_ = 2
# Initialize accelerator
lowerCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['lr']
lowerCAmelCase_ = int(config['num_epochs'] )
lowerCAmelCase_ = int(config['seed'] )
lowerCAmelCase_ = int(config['batch_size'] )
lowerCAmelCase_ = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=a_ )
def inner_training_loop(a_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=a_ )
lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(a_ , a_ )
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=100 , num_training_steps=(len(a_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# Now we train the model
for epoch in range(a_ ):
model.train()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase_ = model(**a_ )
lowerCAmelCase_ = outputs.loss
accelerator.backward(a_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=a_ , references=a_ , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , a_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCamelCase ( ) -> Tuple:
lowerCAmelCase_ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=a_ , default=a_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 318
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : list[list[int]] = []
create_all_state(1, __lowerCamelCase, __lowerCamelCase, [], __lowerCamelCase )
return result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCamelCase, total_number - level + 2 ):
current_list.append(__lowerCamelCase )
create_all_state(i + 1, __lowerCamelCase, level - 1, __lowerCamelCase, __lowerCamelCase )
current_list.pop()
def lowerCamelCase__ (__lowerCamelCase ):
for i in total_list:
print(*__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =4
UpperCamelCase__ =2
UpperCamelCase__ =generate_all_combinations(n, k)
print_all_state(total_list)
| 719
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Construct model
if gpta_config_file == "":
_SCREAMING_SNAKE_CASE : str = GPTaConfig()
else:
_SCREAMING_SNAKE_CASE : int = GPTaConfig.from_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = GPTaModel(__lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Save pytorch-model
_SCREAMING_SNAKE_CASE : Optional[Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict(), __lowerCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__lowerCamelCase, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
UpperCamelCase__ =parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 381
| 0
|
'''simple docstring'''
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = credit_card_number
lowerCamelCase_ : Any = 0
lowerCamelCase_ : str = len(_lowercase ) - 2
for i in range(_lowercase , -1 , -2 ):
# double the value of every second digit
lowerCamelCase_ : Tuple = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase_ : Optional[int] = cc_number[:i] + str(_lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_lowercase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
lowerCamelCase_ : str = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(_lowercase ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(_lowercase ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(_lowercase ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 422
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : str = [True] * limit
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Union[str, Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ : List[Any] = i * 2
while index < limit:
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : str = index + i
lowerCamelCase_ : str = [2]
for i in range(3 , _lowercase , 2 ):
if is_prime[i]:
primes.append(_lowercase )
return primes
def lowercase_ ( _lowercase = 1_000_000 ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = prime_sieve(_lowercase )
lowerCamelCase_ : int = 0
lowerCamelCase_ : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
for j in range(i + length , len(_lowercase ) ):
lowerCamelCase_ : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ : int = j - i
lowerCamelCase_ : Any = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 422
| 1
|
import logging
from transformers import PretrainedConfig
A_ = logging.getLogger(__name__)
A_ = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class __lowercase ( _A ):
lowercase = 'bertabs'
def __init__( self : Dict , __lowerCamelCase : Tuple=3_05_22 , __lowerCamelCase : Tuple=5_12 , __lowerCamelCase : List[Any]=6 , __lowerCamelCase : Any=5_12 , __lowerCamelCase : Any=8 , __lowerCamelCase : Union[str, Any]=5_12 , __lowerCamelCase : Tuple=0.2 , __lowerCamelCase : str=6 , __lowerCamelCase : int=7_68 , __lowerCamelCase : int=8 , __lowerCamelCase : List[Any]=20_48 , __lowerCamelCase : Union[str, Any]=0.2 , **__lowerCamelCase : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowercase = vocab_size
lowercase = max_pos
lowercase = enc_layers
lowercase = enc_hidden_size
lowercase = enc_heads
lowercase = enc_ff_size
lowercase = enc_dropout
lowercase = dec_layers
lowercase = dec_hidden_size
lowercase = dec_heads
lowercase = dec_ff_size
lowercase = dec_dropout
| 479
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
A_ = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
A_ = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def __UpperCAmelCase ( )-> Optional[int]:
"""simple docstring"""
lowercase = (
list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) )
)
lowercase = bs[:]
lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase = [chr(UpperCAmelCase ) for n in cs]
return dict(zip(UpperCAmelCase, UpperCAmelCase ) )
def __UpperCAmelCase ( UpperCAmelCase )-> str:
"""simple docstring"""
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
return pairs
class __lowercase ( _A ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]="replace" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : List[str]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : str=False , **__lowerCamelCase : List[str] , ) -> Dict:
'''simple docstring'''
lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase = json.load(__lowerCamelCase )
lowercase = {v: k for k, v in self.encoder.items()}
lowercase = errors # how to handle errors in decoding
lowercase = bytes_to_unicode()
lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
lowercase = merges_handle.read().split('''\n''' )[1:-1]
lowercase = [tuple(merge.split() ) for merge in bpe_merges]
lowercase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowercase = {}
lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __a ( self : int ) -> Any:
'''simple docstring'''
return len(self.encoder )
def __a ( self : List[str] ) -> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : List[str] , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase = tuple(__lowerCamelCase )
lowercase = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowercase = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase ,lowercase = bigram
lowercase = []
lowercase = 0
while i < len(__lowerCamelCase ):
try:
lowercase = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(__lowerCamelCase )
lowercase = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowercase = get_pairs(__lowerCamelCase )
lowercase = ''' '''.join(__lowerCamelCase )
lowercase = word
return word
def __a ( self : List[str] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = []
for token in re.findall(self.pat , __lowerCamelCase ):
lowercase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def __a ( self : Optional[Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self : List[str] , __lowerCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
return self.decoder.get(__lowerCamelCase )
def __a ( self : str , __lowerCamelCase : List[Any] ) -> Any:
'''simple docstring'''
lowercase = ''''''.join(__lowerCamelCase )
lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __a ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' )
lowercase = 0
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __a ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def __a ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=False , **__lowerCamelCase : List[str] ) -> int:
'''simple docstring'''
lowercase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
lowercase = ''' ''' + text
return (text, kwargs)
| 479
| 1
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = CustomTokenizer
pass
| 70
|
# Algorithm for the pigeonhole sorting
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = min(lowercase ) # min() finds the minimum value
lowerCamelCase_ = max(lowercase ) # max() finds the maximum value
lowerCamelCase_ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCamelCase_ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase , lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCamelCase_ = 0
for count in range(lowercase ):
while holes[count] > 0:
holes[count] -= 1
lowerCamelCase_ = count + min_val
i += 1
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase )
print('Sorted order is:' , ' '.join(lowercase ) )
if __name__ == "__main__":
main()
| 70
| 1
|
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : Any ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : str , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase__ : Any , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
class lowercase__ ( metaclass=UpperCamelCase_):
UpperCamelCase_ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ):
'''simple docstring'''
requires_backends(self , ['''sentencepiece'''] )
| 702
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = FunnelTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def __A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE : int = '''unwanted, running'''
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE : int = tokenizer('''UNwant\u00E9d,running''' )
SCREAMING_SNAKE_CASE : Optional[Any] = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 34
| 0
|
from collections import namedtuple
SCREAMING_SNAKE_CASE : int = namedtuple("from_to", "from_ to")
SCREAMING_SNAKE_CASE : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(_SCREAMING_SNAKE_CASE ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(_SCREAMING_SNAKE_CASE ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , __a ):
def A_ (self ) -> Tuple:
UpperCamelCase_ : Any = load_tool("""text-to-speech""" )
self.tool.setup()
def A_ (self ) -> Dict:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] = self.tool("""hey""" )
UpperCamelCase_ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def A_ (self ) -> Dict:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCamelCase_ : Any = self.tool("""hey""" )
UpperCamelCase_ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 635
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
lowerCAmelCase_ : Dict = TaTokenizer
lowerCAmelCase_ : List[int] = []
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="</s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase=100 , lowerCAmelCase=None , **lowerCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [f'''<extra_id_{i}>''' for i in range(lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ = len(set(filter(lambda lowerCAmelCase : bool("extra_id_" in str(lowerCAmelCase ) ) , lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , extra_ids=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
UpperCAmelCase_ = extra_ids
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase , )
return max_model_length
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A__ ( self ):
return list(
set(filter(lambda lowerCAmelCase : bool(re.search(r"<extra_id_\d+>" , lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def A__ ( self ):
return [self.convert_tokens_to_ids(lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 704
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23
| 0
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_lowerCamelCase = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_lowerCamelCase = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_lowerCamelCase = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_lowerCamelCase = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_lowerCamelCase = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ = randrange(len(__UpperCamelCase ) ), randrange(len(__UpperCamelCase ) )
UpperCAmelCase_ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 100 ) -> Dict:
return (generate_random_hand() for _ in range(__UpperCamelCase ))
@pytest.mark.parametrize('''hand, expected''' , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Dict ) -> Dict:
assert PokerHand(__UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> Any:
assert PokerHand(__UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] ) -> Optional[int]:
UpperCAmelCase_ = PokerHand(__UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) -> Optional[Any]:
assert PokerHand(__UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] ) -> List[str]:
assert PokerHand(__UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int ) -> Union[str, Any]:
assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Any ) -> Union[str, Any]:
assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected
def SCREAMING_SNAKE_CASE ( ) -> str:
UpperCAmelCase_ = [PokerHand(__UpperCamelCase ) for hand in SORTED_HANDS]
UpperCAmelCase_ = poker_hands.copy()
shuffle(__UpperCamelCase )
UpperCAmelCase_ = chain(sorted(__UpperCamelCase ) )
for index, hand in enumerate(__UpperCamelCase ):
assert hand == poker_hands[index]
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
# Test that five high straights are compared correctly.
UpperCAmelCase_ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=__UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCAmelCase_ = PokerHand('''2C 4S AS 3D 5C''' )
UpperCAmelCase_ = True
UpperCAmelCase_ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def SCREAMING_SNAKE_CASE ( ) -> str:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCAmelCase_ = 0
UpperCAmelCase_ = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
UpperCAmelCase_ = os.path.join(__UpperCamelCase , '''poker_hands.txt''' )
with open(__UpperCamelCase ) as file_hand:
for line in file_hand:
UpperCAmelCase_ = line[:14].strip()
UpperCAmelCase_ = line[15:].strip()
UpperCAmelCase_ , UpperCAmelCase_ = PokerHand(__UpperCamelCase ), PokerHand(__UpperCamelCase )
UpperCAmelCase_ = player.compare_with(__UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 144
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : np.ndarray
lowerCAmelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 144
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
_a = MobileBertConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
_a = MobileBertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
_a = load_tf_weights_in_mobilebert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 532
|
'''simple docstring'''
class a :
def __init__( self , __magic_name__ ) -> Optional[int]:
_a = n
_a = [None] * self.n
_a = 0 # index of the first element
_a = 0
_a = 0
def __len__( self ) -> int:
return self.size
def __UpperCAmelCase ( self ) -> bool:
return self.size == 0
def __UpperCAmelCase ( self ) -> Optional[Any]:
return False if self.is_empty() else self.array[self.front]
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[Any]:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_a = data
_a = (self.rear + 1) % self.n
self.size += 1
return self
def __UpperCAmelCase ( self ) -> Any:
if self.size == 0:
raise Exception('UNDERFLOW' )
_a = self.array[self.front]
_a = None
_a = (self.front + 1) % self.n
self.size -= 1
return temp
| 532
| 1
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
a : Any = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowerCAmelCase_ (lowerCAmelCase__: int = "mumbai" ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
UpperCAmelCase_: Dict = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
UpperCAmelCase_: Union[str, Any] = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 556
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( __lowercase ):
__a = """Salesforce/blip-image-captioning-base"""
__a = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
__a = """image_captioner"""
__a = AutoModelForVisionaSeq
__a = ["""image"""]
__a = ["""text"""]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ["""vision"""] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.model.generate(**_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0].strip()
| 518
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = seq_length
UpperCamelCase : str = is_training
UpperCamelCase : Dict = use_attention_mask
UpperCamelCase : List[str] = use_token_type_ids
UpperCamelCase : Tuple = use_labels
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Optional[Any] = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : int = hidden_act
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : Tuple = max_position_embeddings
UpperCamelCase : Dict = type_vocab_size
UpperCamelCase : Optional[Any] = type_sequence_label_size
UpperCamelCase : Any = initializer_range
UpperCamelCase : Union[str, Any] = num_choices
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Dict = None
if self.use_attention_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase : List[Any] = config_and_inputs
UpperCamelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( lowercase__, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = FlaxAlbertModelTester(self )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class_name.from_pretrained('''albert-base-v2''' )
UpperCamelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
UpperCamelCase : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCamelCase : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
UpperCamelCase : Tuple = (1, 11, 768)
self.assertEqual(output.shape , __lowerCamelCase )
UpperCamelCase : Any = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 705
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase : List[str] = k_size // 2
UpperCamelCase , UpperCamelCase : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCamelCase : Dict = 1 / (2 * pi * sigma) * exp(-(square(SCREAMING_SNAKE_CASE_ ) + square(SCREAMING_SNAKE_CASE_ )) / (2 * square(SCREAMING_SNAKE_CASE_ )) )
return g
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Tuple = image.shape[0], image.shape[1]
# dst image height and width
UpperCamelCase : str = height - k_size + 1
UpperCamelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCamelCase : List[Any] = zeros((dst_height * dst_width, k_size * k_size) )
UpperCamelCase : Tuple = 0
for i, j in product(range(SCREAMING_SNAKE_CASE_ ) , range(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase : Dict = ravel(image[i : i + k_size, j : j + k_size] )
UpperCamelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCamelCase : Optional[int] = gen_gaussian_kernel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ravel(SCREAMING_SNAKE_CASE_ )
# reshape and get the dst image
UpperCamelCase : Optional[int] = dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
return dst
if __name__ == "__main__":
# read original image
__UpperCAmelCase : Union[str, Any] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__UpperCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
__UpperCAmelCase : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 643
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class __a ( _snake_case ):
def __init__( self : Tuple ,*lowerCamelCase : Optional[int] ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" ,lowerCamelCase ,)
super().__init__(*lowerCamelCase ,**lowerCamelCase )
| 109
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A__ : Any = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
A__ : Tuple = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Any = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
_lowercase: List[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Dict = '''rougeLsum'''
_lowercase: Dict = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
_lowercase: List[str] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Tuple = ['''rouge1''', '''rouge2''', '''rougeL''']
_lowercase: Dict = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
_lowercase: Optional[int] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
assert score_sep == score_no_sep
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Union[str, Any] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_lowercase: Union[str, Any] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase ) == calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: int = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_lowercase: Union[str, Any] = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_lowercase: List[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=['''rougeLsum'''] , newline_sep=_UpperCamelCase )['''rougeLsum''']
_lowercase: Union[str, Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[str] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_lowercase: int = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
_lowercase: Optional[int] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
| 353
| 0
|
"""simple docstring"""
from PIL import Image
def snake_case ( lowerCAmelCase_ ) -> Image:
_snake_case , _snake_case = image.size
_snake_case = 0
_snake_case = image.load()
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
_snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ ):
_snake_case = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
snake_case = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 713
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404
| 0
|
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : int , ):
A_ : List[Any] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
A_ , A_ : str = input_paths_and_base_extractors[compression_format]
if input_path is None:
A_ : str = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase)
assert base_extractor.is_extractable(lowerCamelCase)
A_ : Optional[Any] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(lowerCamelCase , lowerCamelCase)
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A_ : Any = file_path.read_text(encoding="""utf-8""")
else:
A_ : Tuple = output_path.read_text(encoding="""utf-8""")
A_ : Dict = text_file.read_text(encoding="""utf-8""")
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Any , ):
A_ : Any = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
A_ : Any = input_paths[compression_format]
if input_path is None:
A_ : Optional[int] = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase)
A_ : Union[str, Any] = Extractor.infer_extractor_format(lowerCamelCase)
assert extractor_format is not None
A_ : Dict = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(lowerCamelCase , lowerCamelCase , lowerCamelCase)
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
A_ : List[str] = file_path.read_text(encoding="""utf-8""")
else:
A_ : Union[str, Any] = output_path.read_text(encoding="""utf-8""")
A_ : int = text_file.read_text(encoding="""utf-8""")
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any]):
import tarfile
A_ : Tuple = tmp_path / """data_dot_dot"""
directory.mkdir()
A_ : List[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(lowerCamelCase , """w""") as f:
f.add(lowerCamelCase , arcname=os.path.join("""..""" , text_file.name))
return path
@pytest.fixture
def lowerCamelCase ( lowerCamelCase : List[Any]):
import tarfile
A_ : str = tmp_path / """data_sym_link"""
directory.mkdir()
A_ : str = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=lowerCamelCase)
with tarfile.TarFile(lowerCamelCase , """w""") as f:
f.add(str(directory / """subdir""") , arcname="""subdir""") # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple):
A_ : List[str] = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
A_ : Union[str, Any] = insecure_tar_files[insecure_tar_file]
A_ : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(lowerCamelCase , lowerCamelCase)
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCamelCase ( lowerCamelCase : str):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
A_ : str = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
A_ : int = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""") as f:
f.write(lowerCamelCase)
assert zipfile.is_zipfile(str(lowerCamelCase)) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase) # but we're right
| 665
|
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665
| 1
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , __a , )
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
__a = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a , __a = image[0].size
__a , __a = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
__a = np.concatenate(__a , axis=0 )
__a = np.array(__a ).astype(np.floataa ) / 255.0
__a = image.transpose(0 , 3 , 1 , 2 )
__a = 2.0 * image - 1.0
__a = torch.from_numpy(__a )
elif isinstance(image[0] , torch.Tensor ):
__a = torch.cat(__a , dim=0 )
return image
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[List, PIL.Image.Image, torch.Tensor] ):
"""simple docstring"""
if isinstance(__a , torch.Tensor ):
return mask
elif isinstance(__a , PIL.Image.Image ):
__a = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
__a , __a = mask[0].size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
__a = np.concatenate(__a , axis=0 )
__a = mask.astype(np.floataa ) / 255.0
__a = 0
__a = 1
__a = torch.from_numpy(__a )
elif isinstance(mask[0] , torch.Tensor ):
__a = torch.cat(__a , dim=0 )
return mask
class SCREAMING_SNAKE_CASE ( __lowerCamelCase ):
__lowerCamelCase : int =42
__lowerCamelCase : Any =42
def __init__( self : List[Any] , __lowercase : Union[str, Any] , __lowercase : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : List[Any] = 250 , __lowercase : List[str] = 0.0 , __lowercase : Optional[Any] = 10 , __lowercase : Dict = 10 , __lowercase : str = None , __lowercase : Any = "pil" , __lowercase : Union[str, Any] = True , ):
'''simple docstring'''
__a = image
__a = _preprocess_image(SCREAMING_SNAKE_CASE_ )
__a = original_image.to(device=self.device , dtype=self.unet.dtype )
__a = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
__a = mask_image.to(device=self.device , dtype=self.unet.dtype )
__a = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__a = original_image.shape
__a = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
__a = eta
__a = self.scheduler.timesteps[0] + 1
__a = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__a = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
__a = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__a = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__a = t
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 720
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index == number_of_items:
return 0
__a = 0
__a = 0
__a = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
__a = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547
| 0
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase ( enum.Enum ):
"""simple docstring"""
snake_case_ = 0
snake_case_ = 1
snake_case_ = 2
@add_end_docstrings(UpperCamelCase_ )
class lowercase ( UpperCamelCase_ ):
"""simple docstring"""
snake_case_ = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self : List[Any] , *a_ : Optional[int] , **a_ : Optional[Any] ):
"""simple docstring"""
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase__ = None
if self.model.config.prefix is not None:
lowerCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase__ = self._sanitize_parameters(prefix=_a , **self._forward_params )
lowerCamelCase__ = {**self._preprocess_params, **preprocess_params}
lowerCamelCase__ = {**self._forward_params, **forward_params}
def _UpperCamelCase ( self : Dict , a_ : Dict=None , a_ : Dict=None , a_ : Tuple=None , a_ : int=None , a_ : Union[str, Any]=None , a_ : Union[str, Any]=None , a_ : Optional[Any]=None , a_ : Tuple=None , **a_ : int , ):
"""simple docstring"""
lowerCamelCase__ = {}
if prefix is not None:
lowerCamelCase__ = prefix
if prefix:
lowerCamelCase__ = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
lowerCamelCase__ = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""" )
lowerCamelCase__ = handle_long_generation
preprocess_params.update(_a )
lowerCamelCase__ = generate_kwargs
lowerCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase__ = ReturnType.TENSORS
if return_type is not None:
lowerCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase__ = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _UpperCamelCase ( self : int , *a_ : Any , **a_ : List[str] ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self : Dict , a_ : Tuple , **a_ : Optional[int] ):
"""simple docstring"""
return super().__call__(_a , **_a )
def _UpperCamelCase ( self : Tuple , a_ : Optional[int] , a_ : int="" , a_ : Dict=None , **a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
lowerCamelCase__ = prompt_text
if handle_long_generation == "hole":
lowerCamelCase__ = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase__ = generate_kwargs["""max_new_tokens"""]
else:
lowerCamelCase__ = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
lowerCamelCase__ = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase__ = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def _UpperCamelCase ( self : Optional[Any] , a_ : List[str] , **a_ : int ):
"""simple docstring"""
lowerCamelCase__ = model_inputs["""input_ids"""]
lowerCamelCase__ = model_inputs.get("""attention_mask""" , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = 1
else:
lowerCamelCase__ = input_ids.shape[0]
lowerCamelCase__ = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase__ = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
lowerCamelCase__ = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase__ = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase__ = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase__ = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
lowerCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase__ = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase__ = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _UpperCamelCase ( self : Optional[Any] , a_ : Dict , a_ : List[str]=ReturnType.FULL_TEXT , a_ : Optional[Any]=True ):
"""simple docstring"""
lowerCamelCase__ = model_outputs["""generated_sequence"""][0]
lowerCamelCase__ = model_outputs["""input_ids"""]
lowerCamelCase__ = model_outputs["""prompt_text"""]
lowerCamelCase__ = generated_sequence.numpy().tolist()
lowerCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase__ = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase__ = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase__ = 0
else:
lowerCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase__ = prompt_text + text[prompt_length:]
else:
lowerCamelCase__ = text[prompt_length:]
lowerCamelCase__ = {"""generated_text""": all_text}
records.append(_a )
return records
| 165
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a :str = 16
a :Union[str, Any] = 32
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Dict = 8
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a :Dict = mocked_dataloaders # noqa: F811
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Optional[int] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Any = config["""lr"""]
SCREAMING_SNAKE_CASE__ : str = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : int = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Any = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
def _lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 680
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
lowerCamelCase : Union[str, Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCamelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCamelCase = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
UpperCamelCase = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = {}
if self.train_dir is not None:
lowerCamelCase_ = self.train_dir
if self.validation_dir is not None:
lowerCamelCase_ = self.validation_dir
lowerCamelCase_ = data_files if data_files else None
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase )} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class A:
'''simple docstring'''
def __init__( self : Any , A_ : List[str]=192 , A_ : Optional[Any]=32 , A_ : Tuple=4 , A_ : str=0.6 ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = input_size
lowerCamelCase_ = mask_patch_size
lowerCamelCase_ = model_patch_size
lowerCamelCase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
lowerCamelCase_ = self.input_size // self.mask_patch_size
lowerCamelCase_ = self.mask_patch_size // self.model_patch_size
lowerCamelCase_ = self.rand_size**2
lowerCamelCase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowerCamelCase_ = np.zeros(self.token_count , dtype=A_ )
lowerCamelCase_ = 1
lowerCamelCase_ = mask.reshape((self.rand_size, self.rand_size) )
lowerCamelCase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = torch.stack([example['pixel_values'] for example in examples] )
lowerCamelCase_ = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowerCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase_ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0:
lowerCamelCase_ = ds['train'].train_test_split(data_args.train_val_split )
lowerCamelCase_ = split['train']
lowerCamelCase_ = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
lowerCamelCase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase , 'decoder_type' ):
lowerCamelCase_ = 'simmim'
# adapt config
lowerCamelCase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowerCamelCase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowerCamelCase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase )
elif model_args.model_name_or_path:
lowerCamelCase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase )
else:
lowerCamelCase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowerCamelCase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowerCamelCase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase_ = AutoModelForMaskedImageModeling.from_config(lowercase )
if training_args.do_train:
lowerCamelCase_ = ds['train'].column_names
else:
lowerCamelCase_ = ds['validation'].column_names
if data_args.image_column_name is not None:
lowerCamelCase_ = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase_ = 'image'
elif "img" in column_names:
lowerCamelCase_ = 'img'
else:
lowerCamelCase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowerCamelCase_ = Compose(
[
Lambda(lambda lowercase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowerCamelCase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase : Any ):
lowerCamelCase_ = [transforms(lowercase ) for image in examples[image_column_name]]
lowerCamelCase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowerCamelCase_ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowerCamelCase_ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase )
# Initialize our trainer
lowerCamelCase_ = Trainer(
model=lowercase , args=lowercase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ = trainer.evaluate()
trainer.log_metrics('eval' , lowercase )
trainer.save_metrics('eval' , lowercase )
# Write model card and (optionally) push to hub
lowerCamelCase_ = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 651
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :int = logging.get_logger(__name__)
a :List[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = """gpt_neox_japanese"""
def __init__( self , _a=32_000 , _a=2_560 , _a=32 , _a=32 , _a=4 , _a="gelu" , _a=1.00 , _a=10_000 , _a=2_048 , _a=0.02 , _a=1E-5 , _a=True , _a=31_996 , _a=31_999 , _a=0.1 , _a=0.0 , **_a , ) -> str:
"""simple docstring"""
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_multiple_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = rotary_pct
SCREAMING_SNAKE_CASE__ : List[Any] = rotary_emb_base
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : str = use_cache
SCREAMING_SNAKE_CASE__ : str = attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout
| 680
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = StableDiffusionInpaintPipeline
_SCREAMING_SNAKE_CASE :Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_SCREAMING_SNAKE_CASE :Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_SCREAMING_SNAKE_CASE :Optional[int] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE :Dict = frozenset([])
def _a ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _a , _a=0 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInpaintPipeline(**_a )
SCREAMING_SNAKE_CASE__ : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**_a ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : int = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
SCREAMING_SNAKE_CASE__ : List[str] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Any = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _a ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE__ : List[str] = """stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE__ : Dict = PNDMScheduler.from_pretrained(_a , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 680
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase :List[str] = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[int] = "gpt_bigcode"
snake_case__ : str = ["past_key_values"]
snake_case__ : str = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase__=50_257 , lowercase__=1_024 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=None , lowercase__="gelu_pytorch_tanh" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1E-5 , lowercase__=0.0_2 , lowercase__=True , lowercase__=True , lowercase__=50_256 , lowercase__=50_256 , lowercase__=True , lowercase__=True , lowercase__=True , **lowercase__ , ) -> Any:
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Dict = n_positions
SCREAMING_SNAKE_CASE : List[str] = n_embd
SCREAMING_SNAKE_CASE : str = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : List[Any] = n_inner
SCREAMING_SNAKE_CASE : Tuple = activation_function
SCREAMING_SNAKE_CASE : Any = resid_pdrop
SCREAMING_SNAKE_CASE : Union[str, Any] = embd_pdrop
SCREAMING_SNAKE_CASE : List[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : Any = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = scale_attn_weights
SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE : List[Any] = attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE : Union[str, Any] = scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE : Optional[Any] = multi_query
SCREAMING_SNAKE_CASE : Dict = bos_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
| 179
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase :
'''simple docstring'''
snake_case__ : Dict = LEDConfig
snake_case__ : Dict = {}
snake_case__ : int = "gelu"
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = eos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = tf.concat(
[tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE : Optional[int] = global_attention_mask
return config, inputs_dict
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : List[Any] = TFLEDModel(config=lowercase__ ).get_decoder()
SCREAMING_SNAKE_CASE : int = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : Tuple = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Optional[Any] = 1
# first forward pass
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : int = model(lowercase__ , attention_mask=lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Tuple = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 )
def __lowerCAmelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=None , a_=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE : int = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
snake_case__ : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
snake_case__ : Union[str, Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ : List[Any] = True
snake_case__ : Tuple = False
snake_case__ : Dict = False
snake_case__ : int = False
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowercase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.zeros_like(inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : List[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE : Tuple = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Tuple = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def _UpperCamelCase ( self ) -> Tuple:
pass
def _UpperCamelCase ( self ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def __lowerCAmelCase ( a_ ) -> Any:
'''simple docstring'''
return tf.constant(a_ , dtype=tf.intaa )
_lowerCAmelCase :Union[str, Any] = 1E-4
@slow
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Any = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : List[str] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Any = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : str = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
SCREAMING_SNAKE_CASE : List[Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : str = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : str = model(**lowercase__ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
| 179
| 1
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A : str = logging.get_logger(__name__)
_A : List[Any] = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __snake_case ( lowercase_ ):
'''simple docstring'''
lowerCamelCase__ : str = "detr"
lowerCamelCase__ : List[str] = ["past_key_values"]
lowerCamelCase__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=1_00 , A_=6 , A_=20_48 , A_=8 , A_=6 , A_=20_48 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=2_56 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=1 , A_=1 , A_=5 , A_=2 , A_=0.1 , **A_ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING["""resnet"""](out_features=['''stage4'''] )
elif isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE__ = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(a__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ = None, None, None
SCREAMING_SNAKE_CASE__ = use_timm_backbone
SCREAMING_SNAKE_CASE__ = backbone_config
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = init_xavier_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = auxiliary_loss
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = backbone
SCREAMING_SNAKE_CASE__ = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ = class_cost
SCREAMING_SNAKE_CASE__ = bbox_cost
SCREAMING_SNAKE_CASE__ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ = eos_coefficient
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.d_model
@classmethod
def lowercase_ ( cls , A_ , **A_ ):
'''simple docstring'''
return cls(backbone_config=a__ , **a__ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
class __snake_case ( lowercase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = version.parse("""1.11""" )
@property
def lowercase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowercase_ ( self ):
'''simple docstring'''
return 1E-5
@property
def lowercase_ ( self ):
'''simple docstring'''
return 12
| 100
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class snake_case__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping]):
'''simple docstring'''
def __init__( self , a__=None , **a__ ) -> Tuple:
'''simple docstring'''
super().__init__(features=a__ )
__snake_case :int = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(a__ , a__ ) and column:
if all(
isinstance(a__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a__ )
return column
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(a__ , (str, bytes, type(a__ )) ):
return value
elif isinstance(a__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case :Optional[int] = {}
if isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case :List[Any] = {"""dtype""": torch.intaa}
elif isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case :Tuple = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a__ , PIL.Image.Image ):
__snake_case :Union[str, Any] = np.asarray(a__ )
return torch.tensor(a__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __lowercase ( self , a__ ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a__ , """__array__""" ) and not isinstance(a__ , torch.Tensor ):
__snake_case :Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
elif isinstance(a__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
return self._tensorize(a__ )
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , a__ , map_list=a__ )
def __lowercase ( self , a__ ) -> Mapping:
'''simple docstring'''
__snake_case :Tuple = self.numpy_arrow_extractor().extract_row(a__ )
__snake_case :Any = self.python_features_decoder.decode_row(a__ )
return self.recursive_tensorize(a__ )
def __lowercase ( self , a__ ) -> "torch.Tensor":
'''simple docstring'''
__snake_case :List[str] = self.numpy_arrow_extractor().extract_column(a__ )
__snake_case :List[Any] = self.python_features_decoder.decode_column(a__ , pa_table.column_names[0] )
__snake_case :Optional[Any] = self.recursive_tensorize(a__ )
__snake_case :Any = self._consolidate(a__ )
return column
def __lowercase ( self , a__ ) -> Mapping:
'''simple docstring'''
__snake_case :Optional[int] = self.numpy_arrow_extractor().extract_batch(a__ )
__snake_case :Tuple = self.python_features_decoder.decode_batch(a__ )
__snake_case :Tuple = self.recursive_tensorize(a__ )
for column_name in batch:
__snake_case :str = self._consolidate(batch[column_name] )
return batch
| 455
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( A__ = 4_00_00_00 ):
a_ = []
a_ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case_ )
a_ = b, a + b
return sum(snake_case_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 703
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowercase__ =True
except ImportError:
lowercase__ =False
lowercase__ =logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( A__ ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class a_ ( UpperCamelCase__ ):
@staticmethod
def lowerCAmelCase__ ( UpperCAmelCase ):
a_ = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=UpperCAmelCase , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=UpperCAmelCase , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=UpperCAmelCase )
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , *UpperCAmelCase ):
a_ = testing
a_ = testing_file
a_ = path
def lowerCAmelCase__ ( self ):
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
a_ = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(UpperCAmelCase ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
a_ = (
Path(UpperCAmelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
a_ = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCAmelCase ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
a_ = json.load(UpperCAmelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCAmelCase , extra_context=UpperCAmelCase , )
a_ = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
a_ = json.load(UpperCAmelCase )
a_ = configuration["""lowercase_modelname"""]
a_ = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'''{directory}/configuration.json''' )
a_ = """PyTorch""" in generate_tensorflow_pytorch_and_flax
a_ = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
a_ = """Flax""" in generate_tensorflow_pytorch_and_flax
a_ = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=UpperCAmelCase )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(UpperCAmelCase ):
with open(UpperCAmelCase , """r""" ) as f:
a_ = f.readlines()
with open(UpperCAmelCase , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCAmelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# Create temp file
a_ , a_ = mkstemp()
a_ = False
with fdopen(UpperCAmelCase , """w""" ) as new_file:
with open(UpperCAmelCase ) as old_file:
for line in old_file:
new_file.write(UpperCAmelCase )
if line_to_copy_below in line:
a_ = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCAmelCase )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(UpperCAmelCase , UpperCAmelCase )
# Remove original file
remove(UpperCAmelCase )
# Move new file
move(UpperCAmelCase , UpperCAmelCase )
def skip_units(UpperCAmelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCAmelCase ):
with open(UpperCAmelCase ) as datafile:
a_ = []
a_ = False
a_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
a_ = line.split("""\"""" )[1]
a_ = skip_units(UpperCAmelCase )
elif "# Below: " in line and "##" not in line:
a_ = line.split("""\"""" )[1]
a_ = skip_units(UpperCAmelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
a_ = []
elif "# Replace with" in line and "##" not in line:
a_ = []
elif "##" not in line:
lines_to_copy.append(UpperCAmelCase )
remove(UpperCAmelCase )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(UpperCAmelCase )
| 511
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] =MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase : Dict =TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
__lowerCAmelCase : Optional[Any] = text_generator("""This is a test""" , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
__lowerCAmelCase : List[str] = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
__lowerCAmelCase : List[Any] = text_generator("""This is a test""" , do_sample=lowerCAmelCase , num_return_sequences=2 , return_tensors=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
{"""generated_token_ids""": ANY(lowerCAmelCase )},
{"""generated_token_ids""": ANY(lowerCAmelCase )},
] , )
__lowerCAmelCase : List[Any] = text_generator.model.config.eos_token_id
__lowerCAmelCase : str = """<pad>"""
__lowerCAmelCase : Any = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase , )
self.assertEqual(
lowerCAmelCase , [
[
{"""generated_token_ids""": ANY(lowerCAmelCase )},
{"""generated_token_ids""": ANY(lowerCAmelCase )},
],
[
{"""generated_token_ids""": ANY(lowerCAmelCase )},
{"""generated_token_ids""": ANY(lowerCAmelCase )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
__lowerCAmelCase : str = text_generator("""This is a test""" , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
__lowerCAmelCase : str = text_generator(["""This is a test""", """This is a second test"""] , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Tuple = TextGenerationPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = """Hello I believe in"""
__lowerCAmelCase : Optional[int] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
__lowerCAmelCase : Optional[int] = text_generator(lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
__lowerCAmelCase : Optional[Any] = text_generator(lowerCAmelCase , stop_sequence=""" fe""" )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : int , lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = text_generator.model
__lowerCAmelCase : Optional[int] = text_generator.tokenizer
__lowerCAmelCase : int = text_generator("""This is a test""" )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__lowerCAmelCase : Dict = text_generator("""This is a test""" , return_full_text=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__lowerCAmelCase : Tuple = pipeline(task="""text-generation""" , model=lowerCAmelCase , tokenizer=lowerCAmelCase , return_full_text=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = text_generator("""This is a test""" )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__lowerCAmelCase : Tuple = text_generator("""This is a test""" , return_full_text=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__lowerCAmelCase : Any = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
[{"""generated_text""": ANY(lowerCAmelCase )}, {"""generated_text""": ANY(lowerCAmelCase )}],
[{"""generated_text""": ANY(lowerCAmelCase )}, {"""generated_text""": ANY(lowerCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__lowerCAmelCase : Dict = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
[{"""generated_text""": ANY(lowerCAmelCase )}, {"""generated_text""": ANY(lowerCAmelCase )}],
[{"""generated_text""": ANY(lowerCAmelCase )}, {"""generated_text""": ANY(lowerCAmelCase )}],
] , )
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : List[Any] = text_generator("""test""" , return_full_text=lowerCAmelCase , return_text=lowerCAmelCase )
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : List[Any] = text_generator("""test""" , return_full_text=lowerCAmelCase , return_tensors=lowerCAmelCase )
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : List[str] = text_generator("""test""" , return_text=lowerCAmelCase , return_tensors=lowerCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__lowerCAmelCase : Union[str, Any] = text_generator("""""" )
self.assertEqual(lowerCAmelCase , [{"""generated_text""": ANY(lowerCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__lowerCAmelCase : Union[str, Any] = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__lowerCAmelCase : int = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_00 , max_new_tokens=20 )
__lowerCAmelCase : Dict = text_generator("""This is a test""" * 5_00 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowerCAmelCase ):
text_generator(
"""This is a test""" * 5_00 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
__lowerCAmelCase : Union[str, Any] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowerCAmelCase : Tuple = pipe("""This is a test""" )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__lowerCAmelCase : Dict = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__lowerCAmelCase : int = pipe("""This is a test""" )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__lowerCAmelCase : Dict = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__lowerCAmelCase : str = pipe("""This is a test""" )
self.assertEqual(
lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
import torch
__lowerCAmelCase : Any = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
import torch
__lowerCAmelCase : Optional[int] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=lowerCAmelCase , top_p=0.5 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = """Hello world"""
__lowerCAmelCase : Dict = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
__lowerCAmelCase : Optional[Any] = logging.get_logger("""transformers.generation.tf_utils""" )
else:
__lowerCAmelCase : Optional[Any] = logging.get_logger("""transformers.generation.utils""" )
__lowerCAmelCase : Dict = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowerCAmelCase ) as cl:
__lowerCAmelCase : Optional[Any] = text_generator(lowerCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(lowerCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowerCAmelCase ) as cl:
__lowerCAmelCase : Dict = text_generator(lowerCAmelCase , max_new_tokens=1 )
self.assertNotIn(lowerCAmelCase , cl.out )
with CaptureLogger(lowerCAmelCase ) as cl:
__lowerCAmelCase : str = text_generator(lowerCAmelCase , max_length=10 )
self.assertNotIn(lowerCAmelCase , cl.out )
| 651
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def snake_case_ (__A : Path , __A : list ) -> List[Any]:
__lowerCAmelCase : Any = """\n""".join(__A )
Path(__A ).open("""w""" ).writelines(__A )
__UpperCAmelCase = """patrickvonplaten/t5-tiny-random"""
__UpperCAmelCase = """sshleifer/bart-tiny-random"""
__UpperCAmelCase = """sshleifer/tiny-mbart"""
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCAmelCase : str = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCAmelCase : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
__lowerCAmelCase : Optional[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCAmelCase : Any = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_generate()
assert Path(lowerCAmelCase ).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCAmelCase : List[str] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCAmelCase : Union[str, Any] = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__lowerCAmelCase : List[str] = Path(self.get_auto_remove_tmp_dir() )
__lowerCAmelCase : List[str] = str(tmp_dir / """scores.json""" )
__lowerCAmelCase : Dict = str(tmp_dir / """val.target""" )
_dump_articles(lowerCAmelCase , text["""en"""] )
_dump_articles(lowerCAmelCase , text["""de"""] )
__lowerCAmelCase : List[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCAmelCase : Dict = f'''
run_eval_search.py
{model}
{str(lowerCAmelCase )}
{str(lowerCAmelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
with CaptureStdout() as cs:
run_search()
__lowerCAmelCase : Any = [""" num_beams | length_penalty""", model, """Best score args"""]
__lowerCAmelCase : Tuple = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(lowerCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCAmelCase ).exists()
os.remove(Path(lowerCAmelCase ) )
| 651
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Optional[int] , _A : WhisperForConditionalGeneration , _A : WhisperProcessor , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _A : StableDiffusionSafetyChecker , _A : CLIPImageProcessor , ) -> str:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=_A , speech_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , )
def UpperCAmelCase_ ( self : Dict , _A : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
snake_case_ : Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
self.enable_attention_slicing(_A )
@torch.no_grad()
def __call__( self : List[str] , _A : Tuple , _A : List[str]=16000 , _A : int = 512 , _A : int = 512 , _A : int = 50 , _A : float = 7.5 , _A : Optional[Union[str, List[str]]] = None , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[torch.Generator] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _A : int = 1 , **_A : int , ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = self.speech_processor.feature_extractor(
_A , return_tensors='pt' , sampling_rate=_A ).input_features.to(self.device )
snake_case_ : Union[str, Any] = self.speech_model.generate(_A , max_length=480000 )
snake_case_ : int = self.speech_processor.tokenizer.batch_decode(_A , skip_special_tokens=_A , normalize=_A )[
0
]
if isinstance(_A , _A ):
snake_case_ : int = 1
elif isinstance(_A , _A ):
snake_case_ : Any = len(_A )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(_A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_A )}.""" )
# get prompt text embeddings
snake_case_ : int = self.tokenizer(
_A , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
snake_case_ : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case_ : int = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = text_embeddings.shape
snake_case_ : Union[str, Any] = text_embeddings.repeat(1 , _A , 1 )
snake_case_ : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , _A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case_ : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case_ : List[str]
if negative_prompt is None:
snake_case_ : Dict = [''] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !="""
F""" {type(_A )}.""" )
elif isinstance(_A , _A ):
snake_case_ : Tuple = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
snake_case_ : Optional[Any] = negative_prompt
snake_case_ : Any = text_input_ids.shape[-1]
snake_case_ : Dict = self.tokenizer(
_A , padding='max_length' , max_length=_A , truncation=_A , return_tensors='pt' , )
snake_case_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ : Dict = uncond_embeddings.shape[1]
snake_case_ : Dict = uncond_embeddings.repeat(1 , _A , 1 )
snake_case_ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , _A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case_ : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case_ : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case_ : str = torch.randn(_A , generator=_A , device='cpu' , dtype=_A ).to(
self.device )
else:
snake_case_ : int = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case_ : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case_ : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : Optional[int] = {}
if accepts_eta:
snake_case_ : Tuple = eta
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ : Optional[Any] = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
snake_case_ : str = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case_ ,snake_case_ : Optional[int] = noise_pred.chunk(2 )
snake_case_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Any = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A , _A )
snake_case_ : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents
snake_case_ : int = self.vae.decode(_A ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(_A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 534
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case_ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(__a )
snake_case_ ,snake_case_ : List[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
else:
snake_case_ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(__a )
snake_case_ ,snake_case_ : Dict = ProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
snake_case_ : str = ['key_proj', 'value_proj', 'query_proj']
snake_case_ : List[str] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
snake_case_ : Union[str, Any] = key.split('.' )
if attributes[0] == "lm_head":
snake_case_ : Optional[Any] = prophet
snake_case_ : Any = prophet_old
else:
snake_case_ : Optional[int] = prophet.prophetnet
snake_case_ : str = prophet_old.model
snake_case_ : Union[str, Any] = False
for attribute in attributes:
if attribute in mapping:
snake_case_ : Optional[Any] = mapping[attribute]
if not hasattr(__a , __a ) and len(__a ) > 0:
snake_case_ : List[Any] = attribute
elif hasattr(__a , __a ):
snake_case_ : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case_ : int = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
snake_case_ : Optional[int] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case_ : List[str] = old_model.bias
logger.info(f"""{attribute} is initialized""" )
snake_case_ : int = True
break
elif attribute in special_keys and hasattr(__a , 'in_proj_weight' ):
snake_case_ : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
snake_case_ : List[Any] = getattr(__a , __a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case_ : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case_ : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case_ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case_ : List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case_ : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
snake_case_ : Any = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
snake_case_ : List[Any] = True
break
if attribute.isdigit():
snake_case_ : Any = model[int(__a )]
snake_case_ : Any = old_model[int(__a )]
else:
snake_case_ : str = getattr(__a , __a )
if old_attribute == "":
snake_case_ : Tuple = old_model
else:
if not hasattr(__a , __a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
snake_case_ : List[str] = getattr(__a , __a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 534
| 1
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ ,R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' ,)
class __snake_case ( UpperCamelCase_ ):
def UpperCAmelCase__ ( self : Optional[Any] , A_ : GenericTensor):
if self.framework == "tf":
lowerCAmelCase_ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
lowerCAmelCase_ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_)
else:
raise ValueError('''Unsupported framework''')
return masked_index
def UpperCAmelCase__ ( self : Tuple , A_ : GenericTensor):
lowerCAmelCase_ : List[str] = self.get_masked_index(A_)
lowerCAmelCase_ : Union[str, Any] = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def UpperCAmelCase__ ( self : str , A_ : GenericTensor):
if isinstance(A_ , A_):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A_)
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[str, Any] , A_ : Optional[int]=None , **A_ : List[str]):
if return_tensors is None:
lowerCAmelCase_ : Optional[int] = self.framework
lowerCAmelCase_ : Optional[Any] = self.tokenizer(A_ , return_tensors=A_)
self.ensure_exactly_one_mask_token(A_)
return model_inputs
def UpperCAmelCase__ ( self : List[str] , A_ : str):
lowerCAmelCase_ : Union[str, Any] = self.model(**A_)
lowerCAmelCase_ : List[str] = model_inputs['''input_ids''']
return model_outputs
def UpperCAmelCase__ ( self : str , A_ : str , A_ : str=5 , A_ : int=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase_ : int = target_ids.shape[0]
lowerCAmelCase_ : List[Any] = model_outputs['''input_ids'''][0]
lowerCAmelCase_ : int = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase_ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
lowerCAmelCase_ : Optional[Any] = outputs.numpy()
lowerCAmelCase_ : List[str] = outputs[0, masked_index, :]
lowerCAmelCase_ : List[Any] = stable_softmax(A_ , axis=-1)
if target_ids is not None:
lowerCAmelCase_ : str = tf.gather_nd(tf.squeeze(A_ , 0) , target_ids.reshape(-1 , 1))
lowerCAmelCase_ : Any = tf.expand_dims(A_ , 0)
lowerCAmelCase_ : List[Any] = tf.math.top_k(A_ , k=A_)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase_ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase_ : Dict = outputs[0, masked_index, :]
lowerCAmelCase_ : Dict = logits.softmax(dim=-1)
if target_ids is not None:
lowerCAmelCase_ : str = probs[..., target_ids]
lowerCAmelCase_ , lowerCAmelCase_ : int = probs.topk(A_)
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[int] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
lowerCAmelCase_ : int = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
lowerCAmelCase_ : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase_ : str = target_ids[p].tolist()
lowerCAmelCase_ : List[Any] = p
# Filter padding out:
lowerCAmelCase_ : Tuple = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase_ : Any = self.tokenizer.decode(A_ , skip_special_tokens=A_)
lowerCAmelCase_ : str = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p]), '''sequence''': sequence}
row.append(A_)
result.append(A_)
if single_mask:
return result[0]
return result
def UpperCAmelCase__ ( self : int , A_ : Any , A_ : List[Any]=None):
if isinstance(A_ , A_):
lowerCAmelCase_ : List[str] = [targets]
try:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Any = []
for target in targets:
lowerCAmelCase_ : List[str] = vocab.get(A_ , A_)
if id_ is None:
lowerCAmelCase_ : Optional[int] = self.tokenizer(
A_ , add_special_tokens=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , max_length=1 , truncation=A_ , )['''input_ids''']
if len(A_) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''')
continue
lowerCAmelCase_ : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""")
target_ids.append(id_)
lowerCAmelCase_ : List[str] = list(set(A_))
if len(A_) == 0:
raise ValueError('''At least one target must be provided when passed.''')
lowerCAmelCase_ : Tuple = np.array(A_)
return target_ids
def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[int]=None , A_ : Tuple=None):
lowerCAmelCase_ : int = {}
if targets is not None:
lowerCAmelCase_ : Optional[Any] = self.get_target_ids(A_ , A_)
lowerCAmelCase_ : str = target_ids
if top_k is not None:
lowerCAmelCase_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''')
return {}, {}, postprocess_params
def __call__( self : str , A_ : Tuple , *A_ : Dict , **A_ : Optional[Any]):
lowerCAmelCase_ : Tuple = super().__call__(A_ , **A_)
if isinstance(A_ , A_) and len(A_) == 1:
return outputs[0]
return outputs
| 171
|
from typing import List
from .keymap import KEYMAP, get_character
def UpperCamelCase( __UpperCamelCase : str ):
def decorator(__UpperCamelCase : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = getattr(__UpperCamelCase ,'''handle_key''' ,[] )
handle += [key]
setattr(__UpperCamelCase ,'''handle_key''' ,__UpperCamelCase )
return func
return decorator
def UpperCamelCase( *__UpperCamelCase : List[str] ):
def decorator(__UpperCamelCase : List[str] ):
lowerCAmelCase_ : Any = getattr(__UpperCamelCase ,'''handle_key''' ,[] )
handle += keys
setattr(__UpperCamelCase ,'''handle_key''' ,__UpperCamelCase )
return func
return decorator
class __snake_case ( UpperCamelCase_ ):
def __new__( cls : Optional[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Tuple):
lowerCAmelCase_ : List[str] = super().__new__(cls , A_ , A_ , A_)
if not hasattr(A_ , '''key_handler'''):
setattr(A_ , '''key_handler''' , {})
setattr(A_ , '''handle_input''' , KeyHandler.handle_input)
for value in attrs.values():
lowerCAmelCase_ : Any = getattr(A_ , '''handle_key''' , [])
for key in handled_keys:
lowerCAmelCase_ : Optional[int] = value
return new_cls
@staticmethod
def UpperCAmelCase__ ( cls : List[str]):
lowerCAmelCase_ : List[str] = get_character()
if char != KEYMAP["undefined"]:
lowerCAmelCase_ : Union[str, Any] = ord(A_)
lowerCAmelCase_ : Optional[int] = cls.key_handler.get(A_)
if handler:
lowerCAmelCase_ : Any = char
return handler(cls)
else:
return None
def UpperCamelCase( cls : List[str] ):
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 171
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[int] ): # This function is recursive
"""simple docstring"""
_snake_case : Optional[int] = len(snake_case__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_snake_case : Dict = array[0]
_snake_case : List[Any] = False
_snake_case : Any = 1
_snake_case : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_snake_case : int = True
_snake_case : str = [element for element in array[i:] if element >= array[i]]
_snake_case : Optional[int] = longest_subsequence(snake_case__ )
if len(snake_case__ ) > len(snake_case__ ):
_snake_case : str = temp_array
else:
i += 1
_snake_case : Optional[int] = [element for element in array[1:] if element >= pivot]
_snake_case : Union[str, Any] = [pivot, *longest_subsequence(snake_case__ )]
if len(snake_case__ ) > len(snake_case__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28
| 1
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
UpperCAmelCase : Optional[int] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
UpperCAmelCase : List[str] = "main"
# Default branch name
UpperCAmelCase : List[str] = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
UpperCAmelCase : Tuple = "aaaaaaa"
# This commit does not exist, so we should 404.
UpperCAmelCase : Union[str, Any] = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
UpperCAmelCase : Any = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""") is not None
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
with ContextManagers([]):
print("""Transformers are awesome!""")
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""")
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Any):
"""simple docstring"""
with ContextManagers([context_en()]):
print("""Transformers are awesome!""")
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""")
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
with ContextManagers([context_fr(), context_en()]):
print("""Transformers are awesome!""")
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""")
@require_torch
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
self.assertEqual(find_labels(lowerCAmelCase_) , ["""labels"""])
self.assertEqual(find_labels(lowerCAmelCase_) , ["""labels""", """next_sentence_label"""])
self.assertEqual(find_labels(lowerCAmelCase_) , ["""start_positions""", """end_positions"""])
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
pass
self.assertEqual(find_labels(lowerCAmelCase_) , ["""labels"""])
@require_tf
def _UpperCAmelCase ( self : str):
"""simple docstring"""
self.assertEqual(find_labels(lowerCAmelCase_) , ["""labels"""])
self.assertEqual(find_labels(lowerCAmelCase_) , ["""labels""", """next_sentence_label"""])
self.assertEqual(find_labels(lowerCAmelCase_) , ["""start_positions""", """end_positions"""])
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
pass
self.assertEqual(find_labels(lowerCAmelCase_) , ["""labels"""])
@require_flax
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.assertEqual(find_labels(lowerCAmelCase_) , [])
self.assertEqual(find_labels(lowerCAmelCase_) , [])
self.assertEqual(find_labels(lowerCAmelCase_) , [])
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
pass
self.assertEqual(find_labels(lowerCAmelCase_) , [])
| 567
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = "▁"
UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase : int = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase : Dict = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
UpperCAmelCase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = []
lowercase__ = []
def __init__( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]="<s>" , lowerCAmelCase_ : Union[str, Any]="</s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Union[str, Any]="<pad>" , lowerCAmelCase_ : Dict="<mask>" , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
lowercase_ = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else mask_token
lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase_))
lowercase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ = 1
lowercase_ = len(self.sp_model)
lowercase_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase_)
}
lowercase_ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
lowercase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
lowercase_ = src_lang if src_lang is not None else """en_XX"""
lowercase_ = self.lang_code_to_id[self._src_lang]
lowercase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Dict):
"""simple docstring"""
lowercase_ = self.__dict__.copy()
lowercase_ = None
lowercase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
lowercase_ = {}
lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _UpperCAmelCase ( self : str):
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_)
lowercase_ = [1] * len(self.prefix_tokens)
lowercase_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase_)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase_)) + ([0] * len(lowerCAmelCase_)) + suffix_ones
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Any):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
lowercase_ = src_lang
lowercase_ = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = self.convert_tokens_to_ids(lowerCAmelCase_)
lowercase_ = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = {self.convert_ids_to_tokens(lowerCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ = self.sp_model.PieceToId(lowerCAmelCase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Tuple):
"""simple docstring"""
lowercase_ = """""".join(lowerCAmelCase_).replace(lowerCAmelCase_ , """ """).strip()
return out_string
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase_ = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase_ , """wb""") as fi:
lowercase_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_)
return (out_vocab_file,)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
lowercase_ = src_lang
lowercase_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[src_lang]
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = self.lang_code_to_id[lang]
lowercase_ = []
lowercase_ = [self.eos_token_id, self.cur_lang_code]
| 567
| 1
|
import math
import sys
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
if number != int(snake_case ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__UpperCamelCase = [-1] * (number + 1)
__UpperCamelCase = 0
for i in range(1 , number + 1 ):
__UpperCamelCase = sys.maxsize
__UpperCamelCase = int(math.sqrt(snake_case ) )
for j in range(1 , root + 1 ):
__UpperCamelCase = 1 + answers[i - (j**2)]
__UpperCamelCase = min(snake_case , snake_case )
__UpperCamelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'table-transformer'
_snake_case = ['past_key_values']
_snake_case = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="sine" , SCREAMING_SNAKE_CASE_="resnet50" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , **SCREAMING_SNAKE_CASE_ , )-> Tuple:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = backbone_config.get('''model_type''' )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None, None, None
__UpperCamelCase = use_timm_backbone
__UpperCamelCase = backbone_config
__UpperCamelCase = num_channels
__UpperCamelCase = num_queries
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = encoder_layers
__UpperCamelCase = auxiliary_loss
__UpperCamelCase = position_embedding_type
__UpperCamelCase = backbone
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = dilation
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = mask_loss_coefficient
__UpperCamelCase = dice_loss_coefficient
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self )-> int:
'''simple docstring'''
return self.d_model
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = version.parse('1.11' )
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-5
@property
def A__ ( self )-> int:
'''simple docstring'''
return 12
| 451
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class snake_case__ ( snake_case__ ):
"""simple docstring"""
lowerCamelCase = ["input_features", "is_longer"]
def __init__( self : int , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : List[str]=4_8000 , UpperCamelCase__ : List[str]=480 , UpperCamelCase__ : List[str]=10 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
snake_case : Union[str, Any] = top_db
snake_case : List[str] = truncation
snake_case : Tuple = padding
snake_case : Any = fft_window_size
snake_case : Optional[Any] = (fft_window_size >> 1) + 1
snake_case : Union[str, Any] = hop_length
snake_case : Dict = max_length_s
snake_case : Dict = max_length_s * sampling_rate
snake_case : List[str] = sampling_rate
snake_case : Dict = frequency_min
snake_case : int = frequency_max
snake_case : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm=snake_case__ , mel_scale='''htk''' , )
snake_case : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm='''slaney''' , mel_scale='''slaney''' , )
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case : List[Any] = copy.deepcopy(self.__dict__ )
snake_case : Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCAmelCase ( self : int , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ) -> Tuple:
"""simple docstring"""
snake_case : Dict = spectrogram(
snake_case__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case__ , log_mel='''dB''' , )
return log_mel_spectrogram.T
def lowerCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case : str = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case : str = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case : Union[str, Any] = [0]
# randomly choose index for each part
snake_case : List[Any] = np.random.choice(ranges[0] )
snake_case : List[Any] = np.random.choice(ranges[1] )
snake_case : Optional[Any] = np.random.choice(ranges[2] )
snake_case : List[str] = mel[idx_front : idx_front + chunk_frames, :]
snake_case : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case : Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
snake_case : List[str] = torch.tensor(mel[None, None, :] )
snake_case : Any = torch.nn.functional.interpolate(
snake_case__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=snake_case__ )
snake_case : Any = mel_shrink[0][0].numpy()
snake_case : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowerCAmelCase ( self : List[Any] , UpperCamelCase__ : np.array , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case : str = len(snake_case__ ) - max_length
snake_case : List[Any] = np.random.randint(0 , overflow + 1 )
snake_case : Tuple = waveform[idx : idx + max_length]
snake_case : Optional[Any] = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case : Tuple = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
snake_case : Optional[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case : Optional[int] = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case : str = False
else:
snake_case : List[Any] = self._random_mel_fusion(snake_case__ , snake_case__ , snake_case__ )
snake_case : List[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
snake_case : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case : Optional[int] = int(max_length / len(snake_case__ ) )
snake_case : int = np.stack(np.tile(snake_case__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case : str = int(max_length / len(snake_case__ ) )
snake_case : Optional[int] = np.stack(np.tile(snake_case__ , snake_case__ ) )
snake_case : Optional[Any] = np.pad(snake_case__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
snake_case : Any = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
snake_case : int = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case : str = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Tuple:
"""simple docstring"""
snake_case : Optional[int] = truncation if truncation is not None else self.truncation
snake_case : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case : Optional[Any] = isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
snake_case : Optional[int] = is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case : Tuple = [np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
snake_case : Union[str, Any] = np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case : Any = [np.asarray(snake_case__ )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case : Union[str, Any] = [
self._get_input_mel(snake_case__ , max_length if max_length else self.nb_max_samples , snake_case__ , snake_case__ )
for waveform in raw_speech
]
snake_case : str = []
snake_case : Optional[int] = []
for mel, longer in padded_inputs:
input_mel.append(snake_case__ )
is_longer.append(snake_case__ )
if truncation == "fusion" and sum(snake_case__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case : int = np.random.randint(0 , len(snake_case__ ) )
snake_case : Union[str, Any] = True
if isinstance(input_mel[0] , snake_case__ ):
snake_case : Union[str, Any] = [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case : Tuple = [[longer] for longer in is_longer]
snake_case : Any = {"input_features": input_mel, "is_longer": is_longer}
snake_case : Optional[int] = BatchFeature(snake_case__ )
if return_tensors is not None:
snake_case : int = input_features.convert_to_tensors(snake_case__ )
return input_features
| 638
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ) -> Tuple:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowerCamelCase_ : Optional[Any] =TapasConfig.from_json_file(lowerCamelCase__ )
# set absolute/relative position embeddings parameter
lowerCamelCase_ : Optional[int] =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCamelCase_ : Any =TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCamelCase_ : Optional[Any] =4
lowerCamelCase_ : Optional[int] =True
# hparam_utils.py hparams
lowerCamelCase_ : Dict =0.66_4694
lowerCamelCase_ : List[Any] =0.20_7951
lowerCamelCase_ : int =0.12_1194
lowerCamelCase_ : Union[str, Any] =True
lowerCamelCase_ : List[Any] =True
lowerCamelCase_ : str =False
lowerCamelCase_ : int =0.035_2513
lowerCamelCase_ : str =TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCamelCase_ : List[Any] =4
lowerCamelCase_ : int =False
# hparam_utils.py hparams
lowerCamelCase_ : Tuple =36.4519
lowerCamelCase_ : List[str] =0.90_3421
lowerCamelCase_ : Optional[int] =222.088
lowerCamelCase_ : int =True
lowerCamelCase_ : Any =True
lowerCamelCase_ : List[str] =True
lowerCamelCase_ : Any =0.76_3141
lowerCamelCase_ : Dict =TapasForQuestionAnswering(config=lowerCamelCase__ )
elif task == "TABFACT":
lowerCamelCase_ : Dict =TapasForSequenceClassification(config=lowerCamelCase__ )
elif task == "MLM":
lowerCamelCase_ : Optional[Any] =TapasForMaskedLM(config=lowerCamelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCamelCase_ : str =TapasModel(config=lowerCamelCase__ )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
lowerCamelCase_ : List[Any] =TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(lowerCamelCase__ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 153
| 0
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=50 , lowercase__=0.0_2 , lowercase__=True , lowercase__=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self ) -> Union[str, Any]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self ) -> str:
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , ) -> str:
SCREAMING_SNAKE_CASE : Union[str, Any] = BertGenerationEncoder(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowercase__ , attention_mask=lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , ) -> Tuple:
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[str] = BertGenerationEncoder(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
SCREAMING_SNAKE_CASE : int = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , ) -> int:
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = BertGenerationDecoder(config=lowercase__ ).to(lowercase__ ).eval()
# first forward pass
SCREAMING_SNAKE_CASE : List[Any] = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , use_cache=lowercase__ , )
SCREAMING_SNAKE_CASE : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : Any = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , output_hidden_states=lowercase__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE : List[str] = model(
lowercase__ , attention_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : int = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = BertGenerationDecoder(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Dict = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
snake_case__ : Tuple = (BertGenerationDecoder,) if is_torch_available() else ()
snake_case__ : str = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = BertGenerationEncoderTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[Any] = 'bert'
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase__ )
def _UpperCamelCase ( self ) -> int:
# This regression test was failing with PyTorch < 1.3
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE : str = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowercase__ )
@slow
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Any = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(lowercase__ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
SCREAMING_SNAKE_CASE : int = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ )[0]
SCREAMING_SNAKE_CASE : Any = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : List[Any] = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase__ )[0]
SCREAMING_SNAKE_CASE : List[Any] = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1E-4 ) )
| 179
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCAmelCase :int = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase :int = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_lowerCAmelCase :str = {
"""allenai/led-base-16384""": 16_384,
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Optional[Any] = VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : int = LEDTokenizer
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , lowercase__=True , **lowercase__ , ) -> str:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : List[Any] = getattr(lowercase__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : Dict = 'post_processor'
SCREAMING_SNAKE_CASE : Dict = getattr(self.backend_tokenizer , lowercase__ , lowercase__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Tuple = tuple(state['cls'] )
SCREAMING_SNAKE_CASE : str = False
if state.get('add_prefix_space' , lowercase__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Dict = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[int] = True
if state.get('trim_offsets' , lowercase__ ) != trim_offsets:
SCREAMING_SNAKE_CASE : Optional[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Optional[Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : str = getattr(lowercase__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE : Dict = component_class(**lowercase__ )
setattr(self.backend_tokenizer , lowercase__ , lowercase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _UpperCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase ( self , lowercase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else value
SCREAMING_SNAKE_CASE : Union[str, Any] = value
def _UpperCamelCase ( self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE : int = kwargs.get('is_split_into_words' , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
SCREAMING_SNAKE_CASE : Dict = kwargs.get('is_split_into_words' , lowercase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , lowercase__ , lowercase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = PaddingStrategy.DO_NOT_PAD , lowercase__ = None , lowercase__ = None , ) -> dict:
SCREAMING_SNAKE_CASE : int = super()._pad(
encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : List[str] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : str = len(encoded_inputs['global_attention_mask'] ) != len(lowercase__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : Dict = len(lowercase__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Tuple = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 179
| 1
|
def UpperCAmelCase ( UpperCAmelCase = 100 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (n * (n + 1) // 2) ** 2
SCREAMING_SNAKE_CASE_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 393
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A_ = logging.WARNING
def UpperCAmelCase ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = os.getenv('''DATASETS_VERBOSITY''' ,UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCAmelCase ( )-> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def UpperCAmelCase ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase ( UpperCAmelCase = None )-> logging.Logger:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE_ = _get_library_name()
return logging.getLogger(UpperCAmelCase )
def UpperCAmelCase ( )-> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase ( UpperCAmelCase )-> None:
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class snake_case :
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = args[0] if args else None
def __iter__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
def empty_fn(*lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> List[str]:
"""simple docstring"""
return self
def __exit__( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> Dict:
"""simple docstring"""
return
A_ = True
class snake_case :
'''simple docstring'''
def __call__( self : Union[str, Any] , *lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : int , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ = _tqdm_cls()
def UpperCAmelCase ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = True
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = False
| 393
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = emb.weight.shape
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def A__ ( UpperCamelCase__ , UpperCamelCase__="facebook/mbart-large-en-ro" , UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_SCREAMING_SNAKE_CASE = MBartConfig.from_pretrained(UpperCamelCase__ , vocab_size=UpperCamelCase__ )
if mbart_aa and finetuned:
_SCREAMING_SNAKE_CASE = '''relu'''
_SCREAMING_SNAKE_CASE = state_dict['''decoder.embed_tokens.weight''']
_SCREAMING_SNAKE_CASE = MBartForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ )
if finetuned:
_SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
lowerCamelCase : List[Any] = parser.parse_args()
lowerCamelCase : Any = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 168
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
lowerCamelCase : List[Any] = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def A__ ( UpperCamelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = set()
_SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_SCREAMING_SNAKE_CASE = char
_SCREAMING_SNAKE_CASE = set(UpperCamelCase__ )
return pairs
class __snake_case( __A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , **A_ , ):
'''simple docstring'''
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , **A_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = merges_file
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 3
self.add_from_file(A_ )
_SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
with open(A_ , encoding='''utf-8''' ) as merges_handle:
_SCREAMING_SNAKE_CASE = merges_handle.read().split('''\n''' )[:-1]
_SCREAMING_SNAKE_CASE = [tuple(merge.split()[:-1] ) for merge in merges]
_SCREAMING_SNAKE_CASE = dict(zip(A_ , range(len(A_ ) ) ) )
_SCREAMING_SNAKE_CASE = {}
def A ( self , A_ , A_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def A ( self , A_ , A_ = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self ):
'''simple docstring'''
return len(self.encoder )
def A ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , A_ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_SCREAMING_SNAKE_CASE = tuple(A_ )
_SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_SCREAMING_SNAKE_CASE = get_pairs(A_ )
if not pairs:
return token
while True:
_SCREAMING_SNAKE_CASE = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = bigram
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
while i < len(A_ ):
try:
_SCREAMING_SNAKE_CASE = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_SCREAMING_SNAKE_CASE = tuple(A_ )
_SCREAMING_SNAKE_CASE = new_word
if len(A_ ) == 1:
break
else:
_SCREAMING_SNAKE_CASE = get_pairs(A_ )
_SCREAMING_SNAKE_CASE = '''@@ '''.join(A_ )
_SCREAMING_SNAKE_CASE = word[:-4]
_SCREAMING_SNAKE_CASE = word
return word
def A ( self , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = re.findall(r'''\S+\n?''' , A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(''' ''' ) ) )
return split_tokens
def A ( self , A_ ):
'''simple docstring'''
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def A ( self , A_ ):
'''simple docstring'''
return self.decoder.get(A_ , self.unk_token )
def A ( self , A_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ''' '''.join(A_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def A ( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(A_ ):
copyfile(self.merges_file , A_ )
return out_vocab_file, out_merge_file
def A ( self , A_ ):
'''simple docstring'''
if isinstance(A_ , A_ ):
try:
with open(A_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(A_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
_SCREAMING_SNAKE_CASE = f.readlines()
for lineTmp in lines:
_SCREAMING_SNAKE_CASE = lineTmp.strip()
_SCREAMING_SNAKE_CASE = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
_SCREAMING_SNAKE_CASE = line[:idx]
_SCREAMING_SNAKE_CASE = len(self.encoder )
| 168
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
snake_case = TypeVar("""T""")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ,__A : bool = True ) -> None:
_lowercase = {} # dictionary of lists
_lowercase = directed
def __UpperCAmelCase ( self : Dict ,__A : T ,__A : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
self.adj_list[destination_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
_lowercase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__A )
_lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowercase = [destination_vertex]
_lowercase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__A )
_lowercase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowercase = [destination_vertex]
_lowercase = []
return self
def __repr__( self : Tuple ) -> str:
return pformat(self.adj_list )
| 67
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67
| 1
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE( ) ->int:
'''simple docstring'''
_lowercase : List[str] = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=snake_case_ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=snake_case_ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=snake_case_ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=snake_case_ , default='''data/dump''' , help='''The dump file prefix.''' )
_lowercase : Optional[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
_lowercase : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
_lowercase : Dict = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
_lowercase : List[Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
_lowercase : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_lowercase : Optional[int] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
_lowercase : Dict = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
_lowercase : Optional[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_lowercase : List[str] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
_lowercase : int = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
_lowercase : Optional[Any] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"{len(snake_case_ )} examples to process." )
_lowercase : Optional[Any] = []
_lowercase : int = 0
_lowercase : Optional[int] = 1_00_00
_lowercase : str = time.time()
for text in data:
_lowercase : str = F"{bos} {text.strip()} {sep}"
_lowercase : List[Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
rslt.append(snake_case_ )
iter += 1
if iter % interval == 0:
_lowercase : Optional[Any] = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
_lowercase : Optional[int] = time.time()
logger.info('''Finished binarization''' )
logger.info(F"{len(snake_case_ )} examples processed." )
_lowercase : Dict = F"{args.dump_file}.{args.tokenizer_name}.pickle"
_lowercase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
_lowercase : Dict = [np.uintaa(snake_case_ ) for d in rslt]
else:
_lowercase : Dict = [np.intaa(snake_case_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(snake_case_ , '''wb''' ) as handle:
pickle.dump(rslt_ , snake_case_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 411
|
'''simple docstring'''
import math
def _SCREAMING_SNAKE_CASE( snake_case_ : int ) ->list[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
_lowercase : Any = 2
_lowercase : List[str] = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowercase : Tuple = [True] * (end + 1)
_lowercase : List[str] = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowercase : Tuple = False
start += 1
prime += in_prime
_lowercase : str = end + 1
_lowercase : Optional[int] = min(2 * end , snake_case_ )
while low <= n:
_lowercase : Optional[int] = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Union[str, Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowercase : Optional[int] = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Union[str, Any] = high + 1
_lowercase : Tuple = min(high + end , snake_case_ )
return prime
print(sieve(10**6))
| 411
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Optional[int] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = ["""DeiTFeatureExtractor"""]
lowerCamelCase__ : int = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowerCAmelCase_ = '\nHuman: <<task>>\n\nAssistant: '
lowerCAmelCase_ = 'huggingface-tools/default-prompts'
lowerCAmelCase_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def A__ ( A : Dict , A : List[str] , A : List[str]="run"):
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCamelCase : Optional[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , A) is not None:
return prompt_or_repo_id
UpperCamelCase : int = cached_file(
A , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name})
with open(A , "r" , encoding="utf-8") as f:
return f.read()
| 173
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_UpperCamelCase = trt.Logger(trt.Logger.WARNING)
_UpperCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
_UpperCamelCase = parser.parse_args()
if args.tokenizer_name:
_UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
_UpperCamelCase = args.per_device_eval_batch_size
_UpperCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_UpperCamelCase = True
_UpperCamelCase = 'temp_engine/bert-fp32.engine'
if args.fpaa:
_UpperCamelCase = 'temp_engine/bert-fp16.engine'
if args.inta:
_UpperCamelCase = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
_UpperCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_UpperCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
_UpperCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_UpperCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_UpperCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_UpperCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : List[Any] =np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
__lowerCamelCase : List[Any] =np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
__lowerCamelCase : Union[str, Any] =np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE )
# start time
__lowerCamelCase : Optional[int] =time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE ), int(SCREAMING_SNAKE_CASE )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowerCamelCase : Dict =time.time()
__lowerCamelCase : Tuple =end_time - start_time
__lowerCamelCase : str =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_UpperCamelCase = raw_datasets['validation'].column_names
_UpperCamelCase = 'question' if 'question' in column_names else column_names[0]
_UpperCamelCase = 'context' if 'context' in column_names else column_names[1]
_UpperCamelCase = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_UpperCamelCase = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
_UpperCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__lowerCamelCase : Tuple =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowerCamelCase : Optional[int] =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=SCREAMING_SNAKE_CASE , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowerCamelCase : Tuple =tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowerCamelCase : Union[str, Any] =[]
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowerCamelCase : Dict =tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE )
__lowerCamelCase : str =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowerCamelCase : int =sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowerCamelCase : List[str] =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
_UpperCamelCase = raw_datasets['validation']
# Validation Feature Creation
_UpperCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
_UpperCamelCase = default_data_collator
_UpperCamelCase = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
_UpperCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]="eval" ):
'''simple docstring'''
__lowerCamelCase : Any =postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , predictions=SCREAMING_SNAKE_CASE , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowerCamelCase : Dict =[
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
__lowerCamelCase : Optional[Any] =[{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
__lowerCamelCase : str =[{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE )
_UpperCamelCase = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE ).itemsize
# Allocate device memory for inputs and outputs.
_UpperCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes)
_UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_UpperCamelCase = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
_UpperCamelCase = 0.0
_UpperCamelCase = 0
_UpperCamelCase = timeit.default_timer()
_UpperCamelCase = None
for step, batch in enumerate(eval_dataloader):
_UpperCamelCase , _UpperCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_UpperCamelCase , _UpperCamelCase = outputs
_UpperCamelCase = torch.tensor(start_logits)
_UpperCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_UpperCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_UpperCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_UpperCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_UpperCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_UpperCamelCase = nested_truncate(all_preds, len(eval_dataset))
_UpperCamelCase = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
_UpperCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
_UpperCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 363
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCamelCase = logging.get_logger(__name__)
# General docstring
_UpperCamelCase = 'RegNetConfig'
# Base docstring
_UpperCamelCase = 'facebook/regnet-y-040'
_UpperCamelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCamelCase = 'facebook/regnet-y-040'
_UpperCamelCase = 'tabby, tabby cat'
_UpperCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Dict , __lowercase :int , __lowercase :int = 3 , __lowercase :int = 1 , __lowercase :int = 1 , __lowercase :Optional[str] = "relu" , **__lowercase :int , ):
super().__init__(**__lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : int =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowerCamelCase : str =tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=__lowercase , strides=__lowercase , padding='''VALID''' , groups=__lowercase , use_bias=__lowercase , name='''convolution''' , )
__lowerCamelCase : str =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
__lowerCamelCase : Optional[int] =ACTaFN[activation] if activation is not None else tf.identity
def __lowercase ( self :Optional[int] , __lowercase :Any ):
__lowerCamelCase : str =self.convolution(self.padding(__lowercase ) )
__lowerCamelCase : Optional[int] =self.normalization(__lowercase )
__lowerCamelCase : Any =self.activation(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , **__lowercase :Any ):
super().__init__(**__lowercase )
__lowerCamelCase : Tuple =config.num_channels
__lowerCamelCase : Union[str, Any] =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def __lowercase ( self :int , __lowercase :List[str] ):
__lowerCamelCase : int =shape_list(__lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Union[str, Any] =tf.transpose(__lowercase , perm=(0, 2, 3, 1) )
__lowerCamelCase : Optional[int] =self.embedder(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :int , __lowercase :int = 2 , **__lowercase :Optional[int] ):
super().__init__(**__lowercase )
__lowerCamelCase : int =tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=1 , strides=__lowercase , use_bias=__lowercase , name='''convolution''' )
__lowerCamelCase : List[str] =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def __lowercase ( self :Optional[Any] , __lowercase :tf.Tensor , __lowercase :bool = False ):
return self.normalization(self.convolution(__lowercase ) , training=__lowercase )
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Dict , __lowercase :int , __lowercase :int , **__lowercase :List[str] ):
super().__init__(**__lowercase )
__lowerCamelCase : int =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''' )
__lowerCamelCase : int =[
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def __lowercase ( self :Dict , __lowercase :Union[str, Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Any =self.pooler(__lowercase )
for layer_module in self.attention:
__lowerCamelCase : Any =layer_module(__lowercase )
__lowerCamelCase : Dict =hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Optional[int] , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 1 , **__lowercase :str ):
super().__init__(**__lowercase )
__lowerCamelCase : Dict =in_channels != out_channels or stride != 1
__lowerCamelCase : int =max(1 , out_channels // config.groups_width )
__lowerCamelCase : List[str] =(
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : str =[
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.2''' ),
]
__lowerCamelCase : Optional[int] =ACTaFN[config.hidden_act]
def __lowercase ( self :int , __lowercase :Optional[int] ):
__lowerCamelCase : List[Any] =hidden_state
for layer_module in self.layers:
__lowerCamelCase : str =layer_module(__lowercase )
__lowerCamelCase : List[Any] =self.shortcut(__lowercase )
hidden_state += residual
__lowerCamelCase : Optional[int] =self.activation(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 1 , **__lowercase :List[str] ):
super().__init__(**__lowercase )
__lowerCamelCase : Optional[Any] =in_channels != out_channels or stride != 1
__lowerCamelCase : Optional[Any] =max(1 , out_channels // config.groups_width )
__lowerCamelCase : Dict =(
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__lowerCamelCase : Union[str, Any] =[
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(__lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.3''' ),
]
__lowerCamelCase : Tuple =ACTaFN[config.hidden_act]
def __lowercase ( self :Tuple , __lowercase :Tuple ):
__lowerCamelCase : List[Any] =hidden_state
for layer_module in self.layers:
__lowerCamelCase : int =layer_module(__lowercase )
__lowerCamelCase : List[str] =self.shortcut(__lowercase )
hidden_state += residual
__lowerCamelCase : List[str] =self.activation(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :int , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 2 , __lowercase :int = 2 , **__lowercase :Union[str, Any] ):
super().__init__(**__lowercase )
__lowerCamelCase : List[str] =TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__lowerCamelCase : List[Any] =[
# downsampling is done in the first layer with stride of 2
layer(__lowercase , __lowercase , __lowercase , stride=__lowercase , name='''layers.0''' ),
*[layer(__lowercase , __lowercase , __lowercase , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def __lowercase ( self :int , __lowercase :List[str] ):
for layer_module in self.layers:
__lowerCamelCase : int =layer_module(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :RegNetConfig , **__lowercase :List[str] ):
super().__init__(**__lowercase )
__lowerCamelCase : Optional[int] =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__lowerCamelCase : Any =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowercase , __lowercase , __lowercase , depth=__lowercase , name=f'stages.{i+1}' ) )
def __lowercase ( self :str , __lowercase :tf.Tensor , __lowercase :bool = False , __lowercase :bool = True ):
__lowerCamelCase : Optional[Any] =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Dict =hidden_states + (hidden_state,)
__lowerCamelCase : List[Any] =stage_module(__lowercase )
if output_hidden_states:
__lowerCamelCase : Union[str, Any] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase )
@keras_serializable
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
__snake_case : Optional[int] = RegNetConfig
def __init__( self :List[Any] , __lowercase :Dict , **__lowercase :Union[str, Any] ):
super().__init__(**__lowercase )
__lowerCamelCase : int =config
__lowerCamelCase : List[str] =TFRegNetEmbeddings(__lowercase , name='''embedder''' )
__lowerCamelCase : List[str] =TFRegNetEncoder(__lowercase , name='''encoder''' )
__lowerCamelCase : List[Any] =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''' )
@unpack_inputs
def __lowercase ( self :List[Any] , __lowercase :tf.Tensor , __lowercase :Optional[bool] = None , __lowercase :Optional[bool] = None , __lowercase :bool = False , ):
__lowerCamelCase : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple =self.embedder(__lowercase , training=__lowercase )
__lowerCamelCase : Optional[Any] =self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
__lowerCamelCase : str =encoder_outputs[0]
__lowerCamelCase : Tuple =self.pooler(__lowercase )
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : int =tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
__lowerCamelCase : Any =tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : str =tuple([tf.transpose(__lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Optional[int] = RegNetConfig
__snake_case : int = """regnet"""
__snake_case : int = """pixel_values"""
@property
def __lowercase ( self :List[str] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_UpperCamelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_UpperCamelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , *__lowercase :List[str] , **__lowercase :int ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
__lowerCamelCase : Tuple =TFRegNetMainLayer(__lowercase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowercase ( self :Optional[Any] , __lowercase :tf.Tensor , __lowercase :Optional[bool] = None , __lowercase :Optional[bool] = None , __lowercase :Optional[int]=False , ):
__lowerCamelCase : List[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Dict =self.regnet(
pixel_values=__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , *__lowercase :List[Any] , **__lowercase :Dict ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
__lowerCamelCase : Optional[int] =config.num_labels
__lowerCamelCase : Optional[int] =TFRegNetMainLayer(__lowercase , name='''regnet''' )
# classification head
__lowerCamelCase : Union[str, Any] =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowercase ( self :List[Any] , __lowercase :tf.Tensor = None , __lowercase :tf.Tensor = None , __lowercase :bool = None , __lowercase :bool = None , __lowercase :int=False , ):
__lowerCamelCase : str =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str =self.regnet(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
__lowerCamelCase : Any =outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : List[str] =self.classifier[0](__lowercase )
__lowerCamelCase : str =self.classifier[1](__lowercase )
__lowerCamelCase : str =None if labels is None else self.hf_compute_loss(labels=__lowercase , logits=__lowercase )
if not return_dict:
__lowerCamelCase : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
| 363
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84
|
import doctest
from collections import deque
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self ) -> None:
'''simple docstring'''
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def A ( self ) -> list[float]:
'''simple docstring'''
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(snake_case_ , snake_case_ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(snake_case_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(snake_case_ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(snake_case_ )
for j, item in enumerate(snake_case_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(snake_case_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(snake_case_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 639
| 0
|
'''simple docstring'''
import math
import qiskit
def a ( UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
or isinstance(UpperCamelCase_ , UpperCamelCase_ )
or isinstance(UpperCamelCase_ , UpperCamelCase_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(UpperCamelCase_ ) != input_a)
or (math.floor(UpperCamelCase_ ) != input_a)
or (math.floor(UpperCamelCase_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
snake_case__ =qiskit.QuantumRegister(4 , 'qr' )
snake_case__ =qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
snake_case__ =[input_a, input_a, carry_in]
snake_case__ =qiskit.QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCamelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCamelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCamelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCamelCase_ ) # measure the last two qbits
snake_case__ =qiskit.Aer.get_backend('aer_simulator' )
snake_case__ =qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 581
|
'''simple docstring'''
from itertools import permutations
def a ( UpperCamelCase_ : tuple ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case__ =[7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def a ( UpperCamelCase_ : int = 10 ) -> int:
return sum(
int(''.join(map(UpperCamelCase_ , UpperCamelCase_ ) ) )
for num in permutations(range(UpperCamelCase_ ) )
if is_substring_divisible(UpperCamelCase_ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 581
| 1
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def A_ ( snake_case__ , snake_case__="shi-labs/oneformer_demo" ) -> Optional[Any]:
with open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) as f:
_UpperCamelCase :Tuple = json.load(snake_case__ )
_UpperCamelCase :List[str] = {}
_UpperCamelCase :str = []
_UpperCamelCase :Optional[int] = []
for key, info in class_info.items():
_UpperCamelCase :Dict = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(snake_case__ ) )
_UpperCamelCase :str = thing_ids
_UpperCamelCase :Tuple = class_names
return metadata
class A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=4_00 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_55 , SCREAMING_SNAKE_CASE__="shi-labs/oneformer_demo" , SCREAMING_SNAKE_CASE__="ade20k_panoptic.json" , SCREAMING_SNAKE_CASE__=10 , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = parent
_UpperCamelCase :Optional[int] = batch_size
_UpperCamelCase :Tuple = num_channels
_UpperCamelCase :str = min_resolution
_UpperCamelCase :Tuple = max_resolution
_UpperCamelCase :List[Any] = do_resize
_UpperCamelCase :List[Any] = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
_UpperCamelCase :Tuple = do_normalize
_UpperCamelCase :Optional[Any] = image_mean
_UpperCamelCase :int = image_std
_UpperCamelCase :int = class_info_file
_UpperCamelCase :int = prepare_metadata(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[int] = num_text
_UpperCamelCase :Optional[Any] = repo_path
# for the post_process_functions
_UpperCamelCase :Dict = 2
_UpperCamelCase :List[Any] = 10
_UpperCamelCase :Any = 10
_UpperCamelCase :List[Any] = 3
_UpperCamelCase :List[str] = 4
_UpperCamelCase :Optional[Any] = num_labels
_UpperCamelCase :str = do_reduce_labels
_UpperCamelCase :Optional[int] = ignore_index
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Tuple:
"""simple docstring"""
if not batched:
_UpperCamelCase :Tuple = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
_UpperCamelCase , _UpperCamelCase :List[str] = image.size
else:
_UpperCamelCase , _UpperCamelCase :str = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase :List[Any] = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase :int = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase :str = self.size['''shortest_edge''']
_UpperCamelCase :Tuple = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase :List[Any] = self.size['''shortest_edge''']
_UpperCamelCase :int = self.size['''shortest_edge''']
else:
_UpperCamelCase :List[Any] = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase :List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase :Tuple = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0]
_UpperCamelCase :Optional[Any] = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1]
return expected_height, expected_width
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class A( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
A = image_processing_class
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = OneFormerImageProcessorTester(self )
@property
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''ignore_index''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''class_info_file''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''num_text''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''repo_path''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''metadata''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_reduce_labels''' ) )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
pass
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase :str = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
_UpperCamelCase :str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase :Optional[int] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase :Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :List[Any] = image_processor(
SCREAMING_SNAKE_CASE__ , ['''semantic'''] * len(SCREAMING_SNAKE_CASE__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase :str = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
_UpperCamelCase :int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase :str = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase :int = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :int = image_processor(
SCREAMING_SNAKE_CASE__ , ['''semantic'''] * len(SCREAMING_SNAKE_CASE__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase :Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
_UpperCamelCase :str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase :Dict = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase :Tuple = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[Any] = image_processor(
SCREAMING_SNAKE_CASE__ , ['''semantic'''] * len(SCREAMING_SNAKE_CASE__ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="np" ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCamelCase :str = self.image_processing_tester.num_labels
_UpperCamelCase :Optional[Any] = None
_UpperCamelCase :Union[str, Any] = None
_UpperCamelCase :Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
if with_segmentation_maps:
_UpperCamelCase :int = num_labels
if is_instance_map:
_UpperCamelCase :List[Any] = list(range(SCREAMING_SNAKE_CASE__ ) ) * 2
_UpperCamelCase :Optional[int] = dict(enumerate(SCREAMING_SNAKE_CASE__ ) )
_UpperCamelCase :List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCamelCase :Optional[int] = [Image.fromarray(SCREAMING_SNAKE_CASE__ ) for annotation in annotations]
_UpperCamelCase :Union[str, Any] = image_processor(
SCREAMING_SNAKE_CASE__ , ['''semantic'''] * len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , instance_id_to_semantic_id=SCREAMING_SNAKE_CASE__ , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ , )
return inputs
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
def common(SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None ):
_UpperCamelCase :Union[str, Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=SCREAMING_SNAKE_CASE__ , is_instance_map=SCREAMING_SNAKE_CASE__ , segmentation_type=SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Union[str, Any] = inputs['''mask_labels''']
_UpperCamelCase :List[str] = inputs['''class_labels''']
_UpperCamelCase :Optional[int] = inputs['''pixel_values''']
_UpperCamelCase :Optional[int] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=SCREAMING_SNAKE_CASE__ )
common(is_instance_map=SCREAMING_SNAKE_CASE__ , segmentation_type='''pil''' )
common(is_instance_map=SCREAMING_SNAKE_CASE__ , segmentation_type='''pil''' )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :int = np.zeros((20, 50) )
_UpperCamelCase :List[str] = 1
_UpperCamelCase :str = 1
_UpperCamelCase :Tuple = 1
_UpperCamelCase :List[str] = binary_mask_to_rle(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_UpperCamelCase :List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCamelCase :Tuple = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCamelCase :Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCamelCase :Dict = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE__ , target_sizes=SCREAMING_SNAKE_CASE__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_UpperCamelCase :Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCamelCase :Dict = image_processor.post_process_instance_segmentation(SCREAMING_SNAKE_CASE__ , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
_UpperCamelCase :int = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCamelCase :Any = image_processor.post_process_panoptic_segmentation(SCREAMING_SNAKE_CASE__ , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 355
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
UpperCamelCase__ :List[Any] = logging.get_logger(__name__)
UpperCamelCase__ :Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
UpperCamelCase__ :List[str] = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
UpperCamelCase__ :List[Any] = {
"""RUCAIBox/mvp""": 1_024,
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
A = MvpTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> List[str]:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_UpperCamelCase :int = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
_UpperCamelCase :Optional[int] = add_prefix_space
_UpperCamelCase :List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCamelCase :Any = '''post_processor'''
_UpperCamelCase :Optional[Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
_UpperCamelCase :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCamelCase :Optional[Any] = tuple(state['''sep'''] )
if "cls" in state:
_UpperCamelCase :Optional[Any] = tuple(state['''cls'''] )
_UpperCamelCase :str = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_UpperCamelCase :Optional[int] = add_prefix_space
_UpperCamelCase :Union[str, Any] = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
_UpperCamelCase :Union[str, Any] = trim_offsets
_UpperCamelCase :Optional[Any] = True
if changes_to_apply:
_UpperCamelCase :Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
_UpperCamelCase :Optional[Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
_UpperCamelCase :List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
_UpperCamelCase :int = value
def _UpperCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase :int = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> BatchEncoding:
"""simple docstring"""
_UpperCamelCase :Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
_UpperCamelCase :Any = [self.sep_token_id]
_UpperCamelCase :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 355
| 1
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_lowercase : Dict = logging.getLogger(__name__)
_lowercase : Optional[int] = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=UpperCamelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=UpperCamelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=UpperCamelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=UpperCamelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=UpperCamelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=UpperCamelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=UpperCamelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=UpperCamelCase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=UpperCamelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=UpperCamelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=UpperCamelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=UpperCamelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=UpperCamelCase__ , default=5_12 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=UpperCamelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=UpperCamelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
A = parser.parse_args()
return args
def _lowerCAmelCase ( UpperCamelCase__: Dict ) -> str:
"""simple docstring"""
try:
if args.tpu_name:
A = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
A = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(UpperCamelCase__ )
tf.tpu.experimental.initialize_tpu_system(UpperCamelCase__ )
return tpu
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> str:
"""simple docstring"""
A = 0
for file in file_list:
A = file.split("""/""" )[-1]
A = re.search(r"""-\d+-(\d+)\.tfrecord""" , UpperCamelCase__ ).group(1 )
A = int(UpperCamelCase__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: int=None ) -> Tuple:
"""simple docstring"""
A = count_samples(UpperCamelCase__ )
A = tf.data.Dataset.from_tensor_slices(UpperCamelCase__ )
if shuffle:
A = dataset.shuffle(len(UpperCamelCase__ ) )
A = tf.data.TFRecordDataset(UpperCamelCase__ , num_parallel_reads=UpperCamelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A = dataset.apply(tf.data.experimental.assert_cardinality(UpperCamelCase__ ) )
A = dataset.map(UpperCamelCase__ , num_parallel_calls=UpperCamelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
A = dataset.shuffle(args.shuffle_buffer_size )
A = dataset.batch(UpperCamelCase__ , drop_remainder=UpperCamelCase__ )
A = dataset.map(UpperCamelCase__ , num_parallel_calls=UpperCamelCase__ )
A = dataset.prefetch(UpperCamelCase__ )
return dataset
def _lowerCAmelCase ( UpperCamelCase__: Any ) -> Optional[int]:
"""simple docstring"""
if not args.no_tpu:
A = initialize_tpu(UpperCamelCase__ )
A = tf.distribute.TPUStrategy(UpperCamelCase__ )
else:
A = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A = AutoTokenizer.from_pretrained(args.tokenizer )
A = AutoConfig.from_pretrained(args.pretrained_model_config )
A = tokenizer.vocab_size
A = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
A = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
A = count_samples(UpperCamelCase__ )
A = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A = steps_per_epoch * args.num_epochs
with strategy.scope():
A = TFAutoModelForMaskedLM.from_config(UpperCamelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A , A = create_optimizer(
num_train_steps=UpperCamelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=UpperCamelCase__ , metrics=["""accuracy"""] )
def decode_fn(UpperCamelCase__: Optional[Any] ):
A = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(UpperCamelCase__ , UpperCamelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm_probability=args.mlm_probability , mlm=UpperCamelCase__ , return_tensors="""tf""" )
def mask_with_collator(UpperCamelCase__: List[str] ):
# TF really needs an isin() function
A = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A , A = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(UpperCamelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=UpperCamelCase__ , )
return batch
A = args.per_replica_batch_size * strategy.num_replicas_in_sync
A = prepare_dataset(
UpperCamelCase__ , decode_fn=UpperCamelCase__ , mask_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
A = prepare_dataset(
UpperCamelCase__ , decode_fn=UpperCamelCase__ , mask_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
A = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=UpperCamelCase__ ) )
model.fit(
UpperCamelCase__ , validation_data=UpperCamelCase__ , epochs=args.num_epochs , callbacks=UpperCamelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_lowercase : str = parse_args()
main(args)
| 546
|
def _lowerCAmelCase ( UpperCamelCase__: int ) -> bool:
"""simple docstring"""
return str(UpperCamelCase__ ) == str(UpperCamelCase__ )[::-1]
def _lowerCAmelCase ( UpperCamelCase__: int ) -> int:
"""simple docstring"""
return int(UpperCamelCase__ ) + int(str(UpperCamelCase__ )[::-1] )
def _lowerCAmelCase ( UpperCamelCase__: int = 1_00_00 ) -> int:
"""simple docstring"""
A = []
for num in range(1 , UpperCamelCase__ ):
A = 0
A = num
while iterations < 50:
A = sum_reverse(UpperCamelCase__ )
iterations += 1
if is_palindrome(UpperCamelCase__ ):
break
else:
lychrel_nums.append(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 546
| 1
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: Optional[int] =VideoMAEConfig()
set_architecture_configs(__UpperCAmelCase , __UpperCAmelCase )
if "finetuned" not in model_name:
snake_case: str =False
if "finetuned" in model_name:
snake_case: Any ='huggingface/label-files'
if "kinetics" in model_name:
snake_case: Tuple =4_00
snake_case: Union[str, Any] ='kinetics400-id2label.json'
elif "ssv2" in model_name:
snake_case: Tuple =1_74
snake_case: Dict ='something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
snake_case: Optional[int] =json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
snake_case: Dict ={int(__UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case: Dict =idalabel
snake_case: List[str] ={v: k for k, v in idalabel.items()}
return config
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
if "small" in model_name:
snake_case: List[Any] =3_84
snake_case: Union[str, Any] =15_36
snake_case: List[Any] =12
snake_case: List[str] =16
snake_case: Dict =12
snake_case: Dict =3
snake_case: Dict =1_92
snake_case: Union[str, Any] =7_68
elif "large" in model_name:
snake_case: Union[str, Any] =10_24
snake_case: str =40_96
snake_case: List[str] =24
snake_case: Tuple =16
snake_case: str =12
snake_case: Tuple =8
snake_case: Optional[Any] =5_12
snake_case: Any =20_48
elif "huge" in model_name:
snake_case: Optional[int] =12_80
snake_case: Optional[int] =51_20
snake_case: Union[str, Any] =32
snake_case: int =16
snake_case: Tuple =12
snake_case: Optional[int] =8
snake_case: Tuple =6_40
snake_case: Optional[int] =25_60
elif "base" not in model_name:
raise ValueError('Model name should include either \"small\", \"base\", \"large\", or \"huge\"' )
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
if "encoder." in name:
snake_case: List[str] =name.replace('encoder.' , '' )
if "cls_token" in name:
snake_case: int =name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
snake_case: str =name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
snake_case: str =name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
snake_case: Optional[int] =name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
snake_case: Dict =name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
snake_case: int =name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
snake_case: List[Any] =name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
snake_case: Dict =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
snake_case: Optional[int] =name.replace('attn' , 'attention.self' )
if "attn" in name:
snake_case: Any =name.replace('attn' , 'attention.attention' )
if "norm1" in name:
snake_case: Optional[Any] =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case: List[str] =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case: int =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case: Union[str, Any] =name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
snake_case: Union[str, Any] =name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
snake_case: List[str] =name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
snake_case: str =name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case: Any =name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case: Optional[int] =name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
snake_case: int =name.replace('head' , 'classifier' )
return name
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case: List[Any] =orig_state_dict.pop(__UpperCAmelCase )
if key.startswith('encoder.' ):
snake_case: Tuple =key.replace('encoder.' , '' )
if "qkv" in key:
snake_case: List[str] =key.split('.' )
if key.startswith('decoder.blocks' ):
snake_case: Union[str, Any] =config.decoder_hidden_size
snake_case: Union[str, Any] =int(key_split[2] )
snake_case: Dict ='decoder.decoder_layers.'
if "weight" in key:
snake_case: int =val[:dim, :]
snake_case: Any =val[dim : dim * 2, :]
snake_case: Dict =val[-dim:, :]
else:
snake_case: Union[str, Any] =config.hidden_size
snake_case: List[str] =int(key_split[1] )
snake_case: Union[str, Any] ='videomae.encoder.layer.'
if "weight" in key:
snake_case: Tuple =val[:dim, :]
snake_case: Union[str, Any] =val[dim : dim * 2, :]
snake_case: Tuple =val[-dim:, :]
else:
snake_case: Dict =val
return orig_state_dict
def a_ ( ) -> int:
"""simple docstring"""
snake_case: List[Any] =hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
snake_case: Optional[int] =np.load(__UpperCAmelCase )
return list(__UpperCAmelCase )
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case: int =get_videomae_config(__UpperCAmelCase )
if "finetuned" in model_name:
snake_case: List[str] =VideoMAEForVideoClassification(__UpperCAmelCase )
else:
snake_case: Dict =VideoMAEForPreTraining(__UpperCAmelCase )
# download original checkpoint, hosted on Google Drive
snake_case: int ='pytorch_model.bin'
gdown.cached_download(__UpperCAmelCase , __UpperCAmelCase , quiet=__UpperCAmelCase )
snake_case: str =torch.load(__UpperCAmelCase , map_location='cpu' )
if "model" in files:
snake_case: Optional[Any] =files['model']
else:
snake_case: Tuple =files['module']
snake_case: List[str] =convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# verify model on basic input
snake_case: Tuple =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case: Dict =prepare_video()
snake_case: Optional[int] =image_processor(__UpperCAmelCase , return_tensors='pt' )
if "finetuned" not in model_name:
snake_case: List[str] =hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
snake_case: Optional[int] =torch.load(__UpperCAmelCase )
snake_case: List[Any] =model(**__UpperCAmelCase )
snake_case: Optional[int] =outputs.logits
snake_case: Optional[int] =[
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case: Tuple =torch.Size([1, 4_00] )
snake_case: Optional[Any] =torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case: Union[str, Any] =torch.Size([1, 1_74] )
snake_case: Optional[Any] =torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case: Any =torch.Size([1, 14_08, 15_36] )
snake_case: Any =torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case: int =torch.Size([1, 14_08, 15_36] )
snake_case: Dict =torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case: int =torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case: Optional[Any] =torch.Size([1, 14_08, 15_36] )
snake_case: int =torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case: List[Any] =torch.Size([1, 4_00] )
snake_case: Any =torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case: str =torch.Size([1, 4_00] )
snake_case: Dict =torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case: Tuple =torch.Size([1, 4_00] )
snake_case: Union[str, Any] =torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case: str =torch.Size([1, 4_00] )
snake_case: List[str] =torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case: List[str] =torch.Size([1, 14_08, 15_36] )
snake_case: Dict =torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case: Optional[int] =torch.Size([1, 1_74] )
snake_case: List[str] =torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case: Dict =torch.Size([1, 14_08, 15_36] )
snake_case: Dict =torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case: List[str] =torch.Size([1, 1_74] )
snake_case: Optional[Any] =torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case: Optional[Any] =outputs.loss
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 350
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = np.shape(lowerCAmelCase )
if rows != columns:
_lowerCAmelCase = (
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(lowerCAmelCase )
_lowerCAmelCase = np.zeros((rows, columns) )
_lowerCAmelCase = np.zeros((rows, columns) )
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
_lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
_lowerCAmelCase = (table[i][j] - total) / upper[j][j]
_lowerCAmelCase = 1
for j in range(lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(lowerCAmelCase ) )
_lowerCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _snake_case ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=snake_case_ , )
__UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
__UpperCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
__UpperCAmelCase : Tuple = ClapTextModelWithProjection(snake_case_ )
__UpperCAmelCase : Union[str, Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
__UpperCAmelCase : int = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case_ , )
__UpperCAmelCase : Tuple = SpeechTaHifiGan(snake_case_ )
__UpperCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(snake_case_ ).startswith("mps" ):
__UpperCAmelCase : Optional[Any] = torch.manual_seed(snake_case_ )
else:
__UpperCAmelCase : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__UpperCAmelCase : Union[str, Any] = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components()
__UpperCAmelCase : Optional[int] = AudioLDMPipeline(**snake_case_ )
__UpperCAmelCase : Any = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase : List[str] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase : List[Any] = audioldm_pipe(**snake_case_ )
__UpperCAmelCase : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) == 256
__UpperCAmelCase : Union[str, Any] = audio[:10]
__UpperCAmelCase : int = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : int = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**snake_case_ )
__UpperCAmelCase : Any = audioldm_pipe.to(snake_case_ )
__UpperCAmelCase : List[str] = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase : str = 3 * [inputs["prompt"]]
# forward
__UpperCAmelCase : Dict = audioldm_pipe(**snake_case_ )
__UpperCAmelCase : List[str] = output.audios[0]
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase : str = 3 * [inputs.pop("prompt" )]
__UpperCAmelCase : Optional[Any] = audioldm_pipe.tokenizer(
snake_case_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
__UpperCAmelCase : Dict = text_inputs["input_ids"].to(snake_case_ )
__UpperCAmelCase : Dict = audioldm_pipe.text_encoder(
snake_case_ , )
__UpperCAmelCase : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : Optional[int] = F.normalize(snake_case_ , dim=-1 )
__UpperCAmelCase : List[Any] = prompt_embeds
# forward
__UpperCAmelCase : Tuple = audioldm_pipe(**snake_case_ )
__UpperCAmelCase : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_dummy_components()
__UpperCAmelCase : str = AudioLDMPipeline(**snake_case_ )
__UpperCAmelCase : str = audioldm_pipe.to(snake_case_ )
__UpperCAmelCase : List[Any] = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase : str = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase : Any = 3 * ["this is a negative prompt"]
__UpperCAmelCase : int = negative_prompt
__UpperCAmelCase : Any = 3 * [inputs["prompt"]]
# forward
__UpperCAmelCase : int = audioldm_pipe(**snake_case_ )
__UpperCAmelCase : List[str] = output.audios[0]
__UpperCAmelCase : str = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase : str = 3 * [inputs.pop("prompt" )]
__UpperCAmelCase : Optional[int] = []
for p in [prompt, negative_prompt]:
__UpperCAmelCase : str = audioldm_pipe.tokenizer(
snake_case_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
__UpperCAmelCase : Tuple = text_inputs["input_ids"].to(snake_case_ )
__UpperCAmelCase : Union[str, Any] = audioldm_pipe.text_encoder(
snake_case_ , )
__UpperCAmelCase : List[str] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : int = F.normalize(snake_case_ , dim=-1 )
embeds.append(snake_case_ )
__UpperCAmelCase : List[Any] = embeds
# forward
__UpperCAmelCase : str = audioldm_pipe(**snake_case_ )
__UpperCAmelCase : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : List[Any] = PNDMScheduler(skip_prk_steps=snake_case_ )
__UpperCAmelCase : int = AudioLDMPipeline(**snake_case_ )
__UpperCAmelCase : int = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase : int = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase : Tuple = "egg cracking"
__UpperCAmelCase : Tuple = audioldm_pipe(**snake_case_ , negative_prompt=snake_case_ )
__UpperCAmelCase : str = output.audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) == 256
__UpperCAmelCase : Dict = audio[:10]
__UpperCAmelCase : Dict = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Any = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=snake_case_ )
__UpperCAmelCase : int = AudioLDMPipeline(**snake_case_ )
__UpperCAmelCase : Dict = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase : List[str] = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
__UpperCAmelCase : Optional[Any] = audioldm_pipe(snake_case_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__UpperCAmelCase : Any = 2
__UpperCAmelCase : Union[str, Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : Optional[int] = audioldm_pipe(snake_case_ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__UpperCAmelCase : str = 2
__UpperCAmelCase : Optional[Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
__UpperCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = AudioLDMPipeline(**snake_case_ )
__UpperCAmelCase : Dict = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase : Any = audioldm_pipe.vocoder.config.sampling_rate
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case_ )
__UpperCAmelCase : List[Any] = audioldm_pipe(audio_length_in_s=0.016 , **snake_case_ )
__UpperCAmelCase : str = output.audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) / vocoder_sampling_rate == 0.016
__UpperCAmelCase : Optional[int] = audioldm_pipe(audio_length_in_s=0.032 , **snake_case_ )
__UpperCAmelCase : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) / vocoder_sampling_rate == 0.032
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = AudioLDMPipeline(**snake_case_ )
__UpperCAmelCase : Optional[int] = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase : Any = ["hey"]
__UpperCAmelCase : Any = audioldm_pipe(snake_case_ , num_inference_steps=1 )
__UpperCAmelCase : Any = output.audios.shape
assert audio_shape == (1, 256)
__UpperCAmelCase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__UpperCAmelCase : List[str] = SpeechTaHifiGan(snake_case_ ).to(snake_case_ )
__UpperCAmelCase : List[Any] = audioldm_pipe(snake_case_ , num_inference_steps=1 )
__UpperCAmelCase : List[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]="cpu" , UpperCAmelCase_ : Dict=torch.floataa , UpperCAmelCase_ : List[Any]=0 ):
"""simple docstring"""
__UpperCAmelCase : int = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__UpperCAmelCase : int = np.random.RandomState(snake_case_ ).standard_normal((1, 8, 128, 16) )
__UpperCAmelCase : str = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
__UpperCAmelCase : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
__UpperCAmelCase : Any = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase : Any = self.get_inputs(snake_case_ )
__UpperCAmelCase : Tuple = 25
__UpperCAmelCase : Any = audioldm_pipe(**snake_case_ ).audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) == 81_920
__UpperCAmelCase : Tuple = audio[77_230:77_240]
__UpperCAmelCase : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
__UpperCAmelCase : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : str = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
__UpperCAmelCase : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__UpperCAmelCase : Tuple = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
__UpperCAmelCase : Any = self.get_inputs(snake_case_ )
__UpperCAmelCase : str = audioldm_pipe(**snake_case_ ).audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) == 81_920
__UpperCAmelCase : Any = audio[27_780:27_790]
__UpperCAmelCase : Dict = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
__UpperCAmelCase : List[str] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 721
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase__ : str = "path-to-your-trained-model"
lowerCAmelCase__ : Any = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
lowerCAmelCase__ : Tuple = "A photo of sks dog in a bucket"
lowerCAmelCase__ : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 329
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = None
class UpperCamelCase_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
lowerCAmelCase = 2
@register_to_config
def __init__( self , a = 0.02 , a = 1_00 , a = 1.007 , a = 80 , a = 0.05 , a = 50 , ) -> List[Any]:
# standard deviation of the initial noise distribution
snake_case_ = sigma_max
# setable values
snake_case_ = None
snake_case_ = None
snake_case_ = None # sigma(t_i)
def _UpperCamelCase ( self , a , a = None ) -> torch.FloatTensor:
return sample
def _UpperCamelCase ( self , a , a = None ) -> Optional[int]:
snake_case_ = num_inference_steps
snake_case_ = np.arange(0 , self.num_inference_steps )[::-1].copy()
snake_case_ = torch.from_numpy(a ).to(a )
snake_case_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
snake_case_ = torch.tensor(a , dtype=torch.floataa , device=a )
def _UpperCamelCase ( self , a , a , a = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
snake_case_ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
snake_case_ = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case_ = self.config.s_noise * randn_tensor(sample.shape , generator=a ).to(sample.device )
snake_case_ = sigma + gamma * sigma
snake_case_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _UpperCamelCase ( self , a , a , a , a , a = True , ) -> Union[KarrasVeOutput, Tuple]:
snake_case_ = sample_hat + sigma_hat * model_output
snake_case_ = (sample_hat - pred_original_sample) / sigma_hat
snake_case_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a , derivative=a , pred_original_sample=a )
def _UpperCamelCase ( self , a , a , a , a , a , a , a = True , ) -> Union[KarrasVeOutput, Tuple]:
snake_case_ = sample_prev + sigma_prev * model_output
snake_case_ = (sample_prev - pred_original_sample) / sigma_prev
snake_case_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a , derivative=a , pred_original_sample=a )
def _UpperCamelCase ( self , a , a , a ) -> Dict:
raise NotImplementedError()
| 198
|
import unittest
import numpy as np
def __UpperCAmelCase ( a_ , a_ , a_ , a_ = None , ):
snake_case_ = np.shape(a_)
snake_case_ = np.shape(a_)
snake_case_ = np.shape(a_)
if shape_a[0] != shape_b[0]:
snake_case_ = (
'Expected the same number of rows for A and B. '
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(a_)
if shape_b[1] != shape_c[1]:
snake_case_ = (
'Expected the same number of columns for B and C. '
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(a_)
snake_case_ = pseudo_inv
if a_inv is None:
try:
snake_case_ = np.linalg.inv(a_)
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.')
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
snake_case_ = schur_complement(a , a , a )
snake_case_ = np.block([[a, b], [b.T, c]] )
snake_case_ = np.linalg.det(a )
snake_case_ = np.linalg.det(a )
snake_case_ = np.linalg.det(a )
self.assertAlmostEqual(a , det_a * det_s )
def _UpperCamelCase ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(a ):
schur_complement(a , a , a )
def _UpperCamelCase ( self ) -> None:
snake_case_ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(a ):
schur_complement(a , a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 198
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case ):
UpperCAmelCase : int = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : str , **a_ : List[str] ) -> str:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : Tuple , **a_ : Any ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : str = ["""sentencepiece"""]
def __init__( self : Any , *a_ : str , **a_ : Any ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : List[Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : int , **a_ : Tuple ) -> str:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self : int , *a_ : Dict , **a_ : Any ) -> str:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : int , *a_ : List[Any] , **a_ : Optional[int] ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self : List[Any] , *a_ : Dict , **a_ : Dict ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *a_ : int , **a_ : Optional[Any] ) -> str:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : List[Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Any , **a_ : Dict ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Any = ["""sentencepiece"""]
def __init__( self : str , *a_ : List[Any] , **a_ : Tuple ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : int = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : Union[str, Any] , **a_ : str ) -> int:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : int , *a_ : str , **a_ : List[str] ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Tuple , *a_ : int , **a_ : Any ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *a_ : Union[str, Any] , **a_ : List[str] ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : List[str] = ["""sentencepiece"""]
def __init__( self : List[Any] , *a_ : Any , **a_ : Tuple ) -> int:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : str , **a_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : str = ["""sentencepiece"""]
def __init__( self : int , *a_ : List[Any] , **a_ : Dict ) -> str:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Optional[int] , **a_ : Dict ) -> int:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *a_ : Any , **a_ : Dict ) -> str:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : Optional[int] , **a_ : Tuple ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : str , **a_ : Optional[int] ) -> int:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Any , **a_ : Dict ) -> str:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Any = ["""sentencepiece"""]
def __init__( self : str , *a_ : List[Any] , **a_ : Tuple ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self : Dict , *a_ : Tuple , **a_ : List[str] ) -> Any:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : List[str] = ["""sentencepiece"""]
def __init__( self : str , *a_ : Any , **a_ : List[str] ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Any = ["""sentencepiece"""]
def __init__( self : str , *a_ : int , **a_ : Optional[Any] ) -> str:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Optional[int] , **a_ : Tuple ) -> List[str]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : str = ["""sentencepiece"""]
def __init__( self : List[Any] , *a_ : Union[str, Any] , **a_ : str ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self : List[Any] , *a_ : Tuple , **a_ : Dict ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class a_ ( metaclass=snake_case ):
UpperCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : int , **a_ : List[Any] ) -> int:
requires_backends(self , ['sentencepiece'] )
| 347
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
a = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
a = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case , unittest.TestCase ):
UpperCAmelCase : List[str] = CamembertTokenizer
UpperCAmelCase : Dict = CamembertTokenizerFast
UpperCAmelCase : List[str] = True
UpperCAmelCase : str = True
def UpperCamelCase ( self : str ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case: Dict =CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Tuple ) -> List[Any]:
snake_case: Any ='<pad>'
snake_case: Dict =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
snake_case: List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a_ ) , 1_0_0_4 )
def UpperCamelCase ( self : Dict ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def UpperCamelCase ( self : List[Any] ) -> Dict:
snake_case: Tuple =CamembertTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
snake_case: List[Any] =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case: str ='I was born in 92000, and this is falsé.'
snake_case: Optional[int] =tokenizer.encode(a_ )
snake_case: int =rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
snake_case: Any =tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case: Union[str, Any] =rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case: Any =tokenizer.convert_ids_to_tokens(a_ )
snake_case: int =rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def UpperCamelCase ( self : Dict ) -> int:
if not self.test_rust_tokenizer:
return
snake_case: Tuple =self.get_tokenizer()
snake_case: Union[str, Any] =self.get_rust_tokenizer()
snake_case: Tuple ='I was born in 92000, and this is falsé.'
snake_case: Dict =tokenizer.tokenize(a_ )
snake_case: Optional[int] =rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
snake_case: Optional[Any] =tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case: Optional[Any] =rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
snake_case: Any =self.get_rust_tokenizer()
snake_case: Union[str, Any] =tokenizer.encode(a_ )
snake_case: List[Any] =rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
# fmt: off
snake_case: List[Any] ={'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case: Any =[
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
| 347
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Tuple ) ->str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self : Optional[Any] ) ->Dict:
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : List[Any] = 3
lowerCamelCase__ : List[Any] = (3_2, 3_2)
lowerCamelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def __lowerCamelCase ( self : int ) ->Any:
torch.manual_seed(0 )
lowerCamelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
torch.manual_seed(0 )
lowerCamelCase__ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def __lowerCamelCase ( self : Dict ) ->Union[str, Any]:
def extract(*A : int , **A : Union[str, Any] ):
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] ) ->str:
lowerCamelCase__ : Any = torch.ones([0] )
def __lowerCamelCase ( self : Optional[Any] , A : Dict ) ->Optional[int]:
self.pixel_values.to(A )
return self
return Out()
return extract
def __lowerCamelCase ( self : Tuple ) ->Optional[Any]:
lowerCamelCase__ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str = self.dummy_cond_unet
lowerCamelCase__ : int = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase__ : str = self.dummy_vae
lowerCamelCase__ : str = self.dummy_text_encoder
lowerCamelCase__ : str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase__ : Optional[int] = 7_7
lowerCamelCase__ : Dict = self.dummy_image.to(A )
lowerCamelCase__ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase__ : Tuple = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase__ : str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase__ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : Dict = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ : str = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase__ : Tuple = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase__ : Tuple = output.images
lowerCamelCase__ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase__ : Dict = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Tuple = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase__ : Any = self.dummy_cond_unet
lowerCamelCase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase__ : Tuple = self.dummy_vae
lowerCamelCase__ : Union[str, Any] = self.dummy_text_encoder
lowerCamelCase__ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase__ : Tuple = 7_7
lowerCamelCase__ : Optional[Any] = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase__ : Tuple = unet.half()
lowerCamelCase__ : Optional[int] = vae.half()
lowerCamelCase__ : str = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase__ : List[str] = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase__ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase__ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase__ : int = '''A painting of a squirrel eating a burger'''
lowerCamelCase__ : Tuple = torch.manual_seed(0 )
lowerCamelCase__ : List[str] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowerCamelCase ( self : Union[str, Any] ) ->List[Any]:
lowerCamelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase__ : Dict = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase__ : Dict = '''BAAI/AltDiffusion'''
lowerCamelCase__ : Any = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase__ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase__ : List[str] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase__ : int = output.images[0]
lowerCamelCase__ : Dict = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase__ : Any = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Union[str, Any] ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Tuple ) ->Any:
lowerCamelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase__ : Dict = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase__ : Union[str, Any] = '''BAAI/AltDiffusion'''
lowerCamelCase__ : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase__ : Optional[Any] = '''A fantasy landscape, trending on artstation'''
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase__ : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 315
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return getitem, k
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return setitem, k, v
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return delitem, k
def _a ( UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
try:
return fun(UpperCAmelCase , *UpperCAmelCase ), None
except Exception as e:
return None, e
_A : List[str] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_A : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_A : str = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_A : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_A : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = HashMap(initial_block_size=4 )
lowerCamelCase__ : List[str] = {}
for _, (fun, *args) in enumerate(UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
assert my_res == py_res
assert str(UpperCAmelCase ) == str(UpperCAmelCase )
assert set(UpperCAmelCase ) == set(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def _a ( ) -> Any:
"""simple docstring"""
def is_public(UpperCAmelCase ) -> bool:
return not name.startswith('''_''' )
lowerCamelCase__ : List[Any] = {name for name in dir({} ) if is_public(UpperCAmelCase )}
lowerCamelCase__ : Dict = {name for name in dir(HashMap() ) if is_public(UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 315
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
UpperCamelCase__ = 'docs/source/en/_toctree.yml'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = defaultdict(_UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ : int = [key for key, value in counts.items() if value > 1]
lowercase_ : int = []
for duplicate_key in duplicates:
lowercase_ : List[Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(_UpperCamelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(_UpperCamelCase , key=lambda _UpperCamelCase : s["title"].lower() )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=False ):
"""simple docstring"""
with open(_UpperCamelCase , encoding="utf-8" ) as f:
lowercase_ : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : int = content[api_idx]["sections"]
# Then to the model doc
lowercase_ : Tuple = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase_ : Dict = api_doc[model_idx]["sections"]
lowercase_ : Union[str, Any] = [(idx, section) for idx, section in enumerate(_UpperCamelCase ) if "sections" in section]
lowercase_ : str = False
for idx, modality_doc in modalities_docs:
lowercase_ : Tuple = modality_doc["sections"]
lowercase_ : Union[str, Any] = clean_model_doc_toc(_UpperCamelCase )
if old_modality_doc != new_modality_doc:
lowercase_ : Optional[Any] = True
if overwrite:
lowercase_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
lowercase_ : Optional[int] = model_doc
lowercase_ : int = api_doc
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_UpperCamelCase , allow_unicode=_UpperCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 640
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.