code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__magic_name__ = 'ClapFeatureExtractor'
__magic_name__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = kwargs.pop("sampling_rate" , lowerCamelCase__ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
UpperCAmelCase__: List[Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if audios is not None:
UpperCAmelCase__: Optional[int] = self.feature_extractor(
lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and audios is not None:
UpperCAmelCase__: int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def _UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[Any] = self.tokenizer.model_input_names
UpperCAmelCase__: Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) ) | 113 |
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
import math
__A : int = 10
__A : List[Any] = 7
__A : Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowercase ( __snake_case : str = 2_0 ):
lowercase_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
lowercase_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
lowercase_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 231 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase :int = logging.getLogger(__name__)
lowerCamelCase :List[Any] = 5_0 # max width of layer names
lowerCamelCase :List[Any] = 7_0 # max width of quantizer names
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCamelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCamelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCamelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCamelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCamelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCamelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if args.calibrator == "max":
A_ : Union[str, Any] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
A_ : int = """histogram"""
elif args.calibrator == "mse":
A_ : Dict = """histogram"""
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
A_ : int = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase__ )
A_ : Optional[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
'''simple docstring'''
logger.info("""Configuring Model for Quantization""" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCamelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase__ , [""""""] , _disabled=lowerCamelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase__ , args.quant_disable_keyword , _disabled=lowerCamelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCamelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCamelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase__ , lowerCamelCase__ )
if args.clip_gelu:
clip_gelu(lowerCamelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def fusea(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
A_ : List[Any] = qq._amax.detach().item()
A_ : Optional[int] = qk._amax.detach().item()
A_ : Dict = qv._amax.detach().item()
A_ : Any = max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
qq._amax.fill_(lowerCamelCase__ )
qk._amax.fill_(lowerCamelCase__ )
qv._amax.fill_(lowerCamelCase__ )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
A_ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase__ )
A_ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
A_ : Tuple = mod.weight.shape[0]
A_ : Dict = mod._weight_quantizer._amax.detach()
A_ : List[Any] = torch.ones(lowerCamelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A_ : Dict = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A_ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
A_ : int = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase__ , keepdims=lowerCamelCase__ ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A_ : str = amax
def a ( lowerCamelCase__ , lowerCamelCase__=25 , lowerCamelCase__=1_80 , lowerCamelCase__=None ):
'''simple docstring'''
if ignore is None:
A_ : int = []
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = [ignore]
A_ : Optional[Any] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
A_ : List[str] = max(lowerCamelCase__ , len(lowerCamelCase__ ) )
for name, mod in model.named_modules():
A_ : Tuple = getattr(lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ )
A_ : List[Any] = getattr(lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ )
if not hasattr(lowerCamelCase__ , """weight""" ):
continue
if type(lowerCamelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase__ ) is str and s in name]:
continue
A_ : Optional[int] = f'Act:{input_q.extra_repr()}'
A_ : Dict = f'Wgt:{weight_q.extra_repr()}'
A_ : List[Any] = f'{name:{name_width}} {act_str} {wgt_str}'
if len(lowerCamelCase__ ) <= line_width:
logger.info(lowerCamelCase__ )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase__ , lowerCamelCase__ )
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
logger.warning(f'{name} has no {quantizer}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="both" , **lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_input_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase__ , lowerCamelCase__ , """_weight_quantizer""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(lowerCamelCase__ , """_input_quantizer""" ) or hasattr(lowerCamelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
set_quantizers(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Dict = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
logger.info(lowerCamelCase__ ) | 667 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A = "cpu" , _A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = device
__SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained(_A )
__SCREAMING_SNAKE_CASE = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__SCREAMING_SNAKE_CASE = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__SCREAMING_SNAKE_CASE = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__SCREAMING_SNAKE_CASE = torchvision.transforms.Resize(224 )
__SCREAMING_SNAKE_CASE = torchvision.transforms.CenterCrop(224 )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.resize(_A )
__SCREAMING_SNAKE_CASE = self.center_crop(_A )
__SCREAMING_SNAKE_CASE = self.normalize(_A )
return images
def __call__( self , _A=None , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(text=_A , **_A )
__SCREAMING_SNAKE_CASE = self.preprocess_img(_A )
__SCREAMING_SNAKE_CASE = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A=10 , _A=0.0_1 , _A=None , _A=None , _A=None , _A=None , _A=None , _A=None , _A=False , _A=True , _A="image" , _A=True , _A=False , _A=False , _A=False , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = device if device else get_device()
if vqgan:
__SCREAMING_SNAKE_CASE = vqgan
else:
__SCREAMING_SNAKE_CASE = load_vqgan(self.device , conf_path=_A , ckpt_path=_A )
self.vqgan.eval()
if clip:
__SCREAMING_SNAKE_CASE = clip
else:
__SCREAMING_SNAKE_CASE = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
__SCREAMING_SNAKE_CASE = ProcessorGradientFlow(device=self.device )
__SCREAMING_SNAKE_CASE = iterations
__SCREAMING_SNAKE_CASE = lr
__SCREAMING_SNAKE_CASE = log
__SCREAMING_SNAKE_CASE = make_grid
__SCREAMING_SNAKE_CASE = return_val
__SCREAMING_SNAKE_CASE = quantize
__SCREAMING_SNAKE_CASE = self.vqgan.decoder.z_shape
def _A ( self , _A=None , _A=None , _A=5 , _A=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
if output_path is None:
__SCREAMING_SNAKE_CASE = """./animation.gif"""
if input_path is None:
__SCREAMING_SNAKE_CASE = self.save_path
__SCREAMING_SNAKE_CASE = sorted(glob(input_path + '/*' ) )
if not len(_A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(_A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
__SCREAMING_SNAKE_CASE = total_duration / len(_A )
__SCREAMING_SNAKE_CASE = [frame_duration] * len(_A )
if extend_frames:
__SCREAMING_SNAKE_CASE = 1.5
__SCREAMING_SNAKE_CASE = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(_A ) )
imageio.mimsave(_A , _A , duration=_A )
print(f"""gif saved to {output_path}""" )
def _A ( self , _A=None , _A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
__SCREAMING_SNAKE_CASE = preprocess(Image.open(_A ) , target_image_size=256 ).to(self.device )
__SCREAMING_SNAKE_CASE = preprocess_vqgan(_A )
__SCREAMING_SNAKE_CASE = self.vqgan.encode(_A )
return z
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.latent.detach().requires_grad_()
__SCREAMING_SNAKE_CASE = base_latent + transform_vector
if self.quantize:
__SCREAMING_SNAKE_CASE = self.vqgan.quantize(_A )
else:
__SCREAMING_SNAKE_CASE = trans_latent
return self.vqgan.decode(_A )
def _A ( self , _A , _A , _A=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.clip_preprocessor(text=_A , images=_A , return_tensors='pt' , padding=_A )
__SCREAMING_SNAKE_CASE = self.clip(**_A )
__SCREAMING_SNAKE_CASE = clip_outputs.logits_per_image
if weights is not None:
__SCREAMING_SNAKE_CASE = similarity_logits * weights
return similarity_logits.sum()
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._get_clip_similarity(pos_prompts['prompts'] , _A , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
__SCREAMING_SNAKE_CASE = self._get_clip_similarity(neg_prompts['prompts'] , _A , weights=neg_prompts['weights'] )
else:
__SCREAMING_SNAKE_CASE = torch.tensor([1] , device=self.device )
__SCREAMING_SNAKE_CASE = -torch.log(_A ) + torch.log(_A )
return loss
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.randn_like(self.latent , requires_grad=_A , device=self.device )
__SCREAMING_SNAKE_CASE = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__SCREAMING_SNAKE_CASE = self._add_vector(_A )
__SCREAMING_SNAKE_CASE = loop_post_process(_A )
__SCREAMING_SNAKE_CASE = self._get_CLIP_loss(_A , _A , _A )
print('CLIP loss' , _A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=_A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _A ( self , _A , _A , _A ):
'''simple docstring'''
wandb.init(reinit=_A , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
__SCREAMING_SNAKE_CASE = Image.open(_A )
__SCREAMING_SNAKE_CASE = image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(_A ) )
def _A ( self , _A ):
'''simple docstring'''
if not prompts:
return []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(_A , (tuple, list) ):
__SCREAMING_SNAKE_CASE = prompt[0]
__SCREAMING_SNAKE_CASE = float(prompt[1] )
elif ":" in prompt:
__SCREAMING_SNAKE_CASE = prompt.split(':' )
__SCREAMING_SNAKE_CASE = float(_A )
else:
__SCREAMING_SNAKE_CASE = prompt
__SCREAMING_SNAKE_CASE = 1.0
processed_prompts.append(_A )
weights.append(_A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(_A , device=self.device ),
}
def _A ( self , _A , _A=None , _A=None , _A=True , _A=False , _A=True , _A=True , _A=None , ):
'''simple docstring'''
if image_path:
__SCREAMING_SNAKE_CASE = self._get_latent(_A )
else:
__SCREAMING_SNAKE_CASE = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(_A , _A , _A )
assert pos_prompts, "You must provide at least one positive prompt."
__SCREAMING_SNAKE_CASE = self.process_prompts(_A )
__SCREAMING_SNAKE_CASE = self.process_prompts(_A )
if save_final and save_path is None:
__SCREAMING_SNAKE_CASE = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
__SCREAMING_SNAKE_CASE = save_path + """_""" + get_timestamp()
os.makedirs(_A )
__SCREAMING_SNAKE_CASE = save_path
__SCREAMING_SNAKE_CASE = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(_A ) )
__SCREAMING_SNAKE_CASE = loop_post_process(_A )
for iter, transformed_img in enumerate(self._optimize_CLIP(_A , _A , _A ) ):
if show_intermediate:
show_pil(_A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'Image': wandb.Image(_A )} )
if show_final:
show_pil(_A )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 148 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : List[Any] = 0
@slow
def _a (self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase ) , 0 )
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _a (self ):
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
# Check that tokenizer_type ≠ model_type
A_ : int = AutoTokenizer.from_pretrained(lowercase , config=lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
@require_tokenizers
def _a (self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(lowercase , """vocab.txt""" ) )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""bert""" )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(lowercase , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(lowercase , """merges.txt""" ) )
A_ : int = AutoTokenizer.from_pretrained(lowercase , tokenizer_type="""gpt2""" )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
with pytest.raises(lowercase ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ : str = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase , lowercase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase )
else:
self.assertEqual(tokenizer.do_lower_case , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _a (self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ : int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def _a (self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
A_ : List[str] = TOKENIZER_MAPPING.values()
A_ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase )
@require_tokenizers
def _a (self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase ) , lowercase )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , lowercase )
@require_tokenizers
def _a (self ):
A_ : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=lowercase )
A_ : List[Any] = """Hello, world. How are you?"""
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ : Dict = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=lowercase )
A_ : List[Any] = tokenizer.tokenize(lowercase )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def _a (self ):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(lowercase ) , lowercase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def _a (self ):
A_ : Any = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _a (self ):
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
# Check we can load the tokenizer config of an online model.
A_ : Tuple = get_tokenizer_config("""bert-base-cased""" )
A_ : Any = config.pop("""_commit_hash""" , lowercase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ : List[Any] = get_tokenizer_config(lowercase )
self.assertDictEqual(lowercase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ : int = AutoTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Dict = get_tokenizer_config(lowercase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
A_ : Tuple = CustomTokenizer.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _a (self ):
try:
AutoConfig.register("""custom""" , lowercase )
# Can register in two steps
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase , slow_tokenizer_class=lowercase , fast_tokenizer_class=lowercase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = BertTokenizerFast.from_pretrained(lowercase )
bert_tokenizer.save_pretrained(lowercase )
A_ : Optional[Any] = CustomTokenizerFast.from_pretrained(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoTokenizer.from_pretrained(lowercase , use_fast=lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase ):
A_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase ):
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : int = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase )
A_ : Any = AutoTokenizer.from_pretrained(lowercase , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def _a (self ):
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = False
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = NewTokenizer
__SCREAMING_SNAKE_CASE : Optional[Any] = False
try:
AutoConfig.register("""custom""" , lowercase )
AutoTokenizer.register(lowercase , slow_tokenizer_class=lowercase )
AutoTokenizer.register(lowercase , fast_tokenizer_class=lowercase )
# If remote code is not set, the default is to use local
A_ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ : int = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _a (self ):
A_ : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=lowercase , use_fast=lowercase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ : List[str] = AutoTokenizer.from_pretrained("""bert-base""" )
def _a (self ):
with self.assertRaisesRegex(
lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Tuple = AutoTokenizer.from_pretrained(lowercase , revision="""aaaaaa""" )
def _a (self ):
# Make sure we have cached the tokenizer.
A_ : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 667 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase :
def __init__(self : Any , snake_case__ : List[str] ) -> str:
'''simple docstring'''
snake_case : int = str(id_ )
snake_case : Optional[Any] = None
snake_case : Dict = None
snake_case : Optional[Any] = []
snake_case : Union[str, Any] = {} # {vertex:distance}
def __lt__(self : Optional[Any] , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.key < other.key
def __repr__(self : str ) -> Optional[int]:
'''simple docstring'''
return self.id
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict ) -> str:
'''simple docstring'''
self.neighbors.append(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Dict , snake_case__ : int ) -> int:
'''simple docstring'''
snake_case : int = weight
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCamelCase__ )
graph[b - 1].add_edge(graph[a - 1] , lowerCamelCase__ )
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
snake_case : Union[str, Any] = []
for u in graph:
snake_case : Optional[Any] = math.inf
snake_case : Any = None
snake_case : Any = 0
snake_case : Optional[Any] = graph[:]
while q:
snake_case : int = min(lowerCamelCase__ )
q.remove(lowerCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case : List[str] = u
snake_case : Any = u.edges[v.id]
for i in range(1 , len(lowerCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ):
for u in graph:
snake_case : Union[str, Any] = math.inf
snake_case : List[str] = None
snake_case : Optional[Any] = 0
snake_case : Union[str, Any] = list(lowerCamelCase__ )
hq.heapify(lowerCamelCase__ )
while h:
snake_case : Tuple = hq.heappop(lowerCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case : Optional[int] = u
snake_case : Tuple = u.edges[v.id]
hq.heapify(lowerCamelCase__ )
for i in range(1 , len(lowerCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _SCREAMING_SNAKE_CASE( __UpperCAmelCase ):
def __init__( self ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
requires_backends(self ,'''vision''' )
requires_backends(self ,'''torch''' )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = {}
__SCREAMING_SNAKE_CASE :Dict = {}
__SCREAMING_SNAKE_CASE :str = {}
# preprocess args
if "points_per_batch" in kwargs:
__SCREAMING_SNAKE_CASE :Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__SCREAMING_SNAKE_CASE :int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__SCREAMING_SNAKE_CASE :str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__SCREAMING_SNAKE_CASE :int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__SCREAMING_SNAKE_CASE :Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__SCREAMING_SNAKE_CASE :Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__SCREAMING_SNAKE_CASE :Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__SCREAMING_SNAKE_CASE :Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__SCREAMING_SNAKE_CASE :List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__SCREAMING_SNAKE_CASE :Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__SCREAMING_SNAKE_CASE :List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__SCREAMING_SNAKE_CASE :Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self ,SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,num_workers=SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=64 ,SCREAMING_SNAKE_CASE__ = 0 ,SCREAMING_SNAKE_CASE__ = 5_12 / 15_00 ,SCREAMING_SNAKE_CASE__ = 32 ,SCREAMING_SNAKE_CASE__ = 1 ,) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = load_image(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = self.image_processor.size["""longest_edge"""]
__SCREAMING_SNAKE_CASE :str = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self.image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
__SCREAMING_SNAKE_CASE :Optional[Any] = self.get_inference_context()
with inference_context():
__SCREAMING_SNAKE_CASE :str = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE__ ,device=self.device )
__SCREAMING_SNAKE_CASE :Tuple = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
__SCREAMING_SNAKE_CASE :Tuple = image_embeddings
__SCREAMING_SNAKE_CASE :Dict = grid_points.shape[1]
__SCREAMING_SNAKE_CASE :Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Tuple = grid_points[:, i : i + points_per_batch, :, :]
__SCREAMING_SNAKE_CASE :List[Any] = input_labels[:, i : i + points_per_batch]
__SCREAMING_SNAKE_CASE :Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0.8_8 ,SCREAMING_SNAKE_CASE__=0.9_5 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=1 ,) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = model_inputs.pop('''input_boxes''' )
__SCREAMING_SNAKE_CASE :str = model_inputs.pop('''is_last''' )
__SCREAMING_SNAKE_CASE :int = model_inputs.pop('''original_sizes''' ).tolist()
__SCREAMING_SNAKE_CASE :int = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
__SCREAMING_SNAKE_CASE :List[str] = self.model(**SCREAMING_SNAKE_CASE__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__SCREAMING_SNAKE_CASE :Optional[int] = model_outputs["""pred_masks"""]
__SCREAMING_SNAKE_CASE :Tuple = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,binarize=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = model_outputs["""iou_scores"""]
__SCREAMING_SNAKE_CASE :Tuple = self.image_processor.filter_masks(
masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.7 ,) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = []
__SCREAMING_SNAKE_CASE :Optional[Any] = []
__SCREAMING_SNAKE_CASE :str = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
__SCREAMING_SNAKE_CASE :Any = torch.cat(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = torch.cat(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = defaultdict(SCREAMING_SNAKE_CASE__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = {}
if output_rle_mask:
__SCREAMING_SNAKE_CASE :List[str] = rle_mask
if output_bboxes_mask:
__SCREAMING_SNAKE_CASE :Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 498 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Any = logging.get_logger(__name__)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : Optional[int] = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
A_ : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = dct.pop(lowerCamelCase__ )
A_ : Optional[int] = val
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
A_ : Optional[Any] = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Tuple = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=lowerCamelCase__ )
A_ : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : List[str] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Union[str, Any] = 10_24
A_ : List[Any] = 40_96
A_ : Dict = 24
A_ : List[str] = 16
A_ : Union[str, Any] = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Optional[Any] = False
A_ : Union[str, Any] = """relu"""
A_ : List[str] = 10_24
A_ : Tuple = True
A_ : Tuple = False
A_ : List[str] = False
# load HuggingFace model
A_ : Optional[int] = ViTModel(lowerCamelCase__ , add_pooling_layer=lowerCamelCase__ )
A_ : Dict = TrOCRForCausalLM(lowerCamelCase__ )
A_ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ : int = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )["""model"""]
A_ : int = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ : str = val
else:
A_ : List[str] = val
# load state dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
A_ : str = ViTImageProcessor(size=encoder_config.image_size )
A_ : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ : Tuple = TrOCRProcessor(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = processor(images=prepare_img(lowerCamelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
A_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Union[str, Any] = model(pixel_values=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ )
A_ : Dict = outputs.logits
A_ : str = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : Any = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ : List[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowerCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 667 | 0 |
import math
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =[]
UpperCAmelCase__ =2
UpperCAmelCase__ =int(math.sqrt(lowerCamelCase__ ) ) # Size of every segment
UpperCAmelCase__ =[True] * (end + 1)
UpperCAmelCase__ =[]
while start <= end:
if temp[start] is True:
in_prime.append(lowerCamelCase__ )
for i in range(start * start , end + 1 , lowerCamelCase__ ):
UpperCAmelCase__ =False
start += 1
prime += in_prime
UpperCAmelCase__ =end + 1
UpperCAmelCase__ =min(2 * end , lowerCamelCase__ )
while low <= n:
UpperCAmelCase__ =[True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ =math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCamelCase__ , high + 1 , lowerCamelCase__ ):
UpperCAmelCase__ =False
for j in range(len(lowerCamelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ =high + 1
UpperCAmelCase__ =min(high + end , lowerCamelCase__ )
return prime
print(sieve(10**6))
| 625 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 667 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCamelCase__ (_UpperCAmelCase):
return EnvironmentCommand()
def lowerCamelCase__ (_UpperCAmelCase):
return EnvironmentCommand(args.accelerate_config_file)
class _snake_case ( __UpperCAmelCase ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> Dict:
SCREAMING_SNAKE_CASE = parser.add_parser('env')
download_parser.set_defaults(func=a)
download_parser.add_argument(
'--accelerate-config_file' , default=a , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=a)
def __init__( self , a , *a) -> Tuple:
SCREAMING_SNAKE_CASE = accelerate_config_file
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = """not installed"""
if is_safetensors_available():
import safetensors
SCREAMING_SNAKE_CASE = safetensors.__version__
elif importlib.util.find_spec('safetensors') is not None:
import safetensors
SCREAMING_SNAKE_CASE = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
SCREAMING_SNAKE_CASE = """not installed"""
SCREAMING_SNAKE_CASE = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a):
SCREAMING_SNAKE_CASE = load_config_from_file(self._accelerate_config_file).to_dict()
SCREAMING_SNAKE_CASE = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()])
if isinstance(a , a)
else f'''\t{accelerate_config}'''
)
SCREAMING_SNAKE_CASE = """not installed"""
SCREAMING_SNAKE_CASE = """NA"""
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE = torch.__version__
SCREAMING_SNAKE_CASE = torch.cuda.is_available()
SCREAMING_SNAKE_CASE = """not installed"""
SCREAMING_SNAKE_CASE = """NA"""
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE = tf.__version__
try:
# deprecated in v2.1
SCREAMING_SNAKE_CASE = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
SCREAMING_SNAKE_CASE = bool(tf.config.list_physical_devices('GPU'))
SCREAMING_SNAKE_CASE = """not installed"""
SCREAMING_SNAKE_CASE = """not installed"""
SCREAMING_SNAKE_CASE = """not installed"""
SCREAMING_SNAKE_CASE = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
SCREAMING_SNAKE_CASE = flax.__version__
SCREAMING_SNAKE_CASE = jax.__version__
SCREAMING_SNAKE_CASE = jaxlib.__version__
SCREAMING_SNAKE_CASE = jax.lib.xla_bridge.get_backend().platform
SCREAMING_SNAKE_CASE = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n')
print(self.format_dict(a))
return info
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a) -> Any:
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()]) + "\n"
| 73 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any() | 667 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__UpperCamelCase : Any = 10
def a_ ( _A , _A , _A , _A ) -> Tuple:
"""simple docstring"""
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
if array[i] == target:
return i
return -1
def a_ ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = 0
snake_case__ = len(lowerCamelCase__ )
while left <= right:
if right - left < precision:
return lin_search(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
snake_case__ = (left + right) // 3 + 1
snake_case__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
snake_case__ = one_third - 1
elif array[two_third] < target:
snake_case__ = two_third + 1
else:
snake_case__ = one_third + 1
snake_case__ = two_third - 1
else:
return -1
def a_ ( _A , _A , _A , _A ) -> Any:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
snake_case__ = (left + right) // 3 + 1
snake_case__ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCamelCase__ , one_third - 1 , lowerCamelCase__ , lowerCamelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCamelCase__ , lowerCamelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Union[str, Any] = input("""Enter numbers separated by comma:\n""").strip()
__UpperCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__UpperCamelCase : Any = int(input("""Enter the number to be found in the list:\n""").strip())
__UpperCamelCase : Dict = ite_ternary_search(collection, target)
__UpperCamelCase : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 328 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :Dict = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Optional[int] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowercase , getattr(lowercase , lowercase ) )
A_ : List[Any] = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Dict = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Union[str, Any] = obj
A_ : Optional[int] = target
A_ : Optional[Any] = new
A_ : Optional[Any] = target.split(""".""" )[0]
A_ : Tuple = {}
A_ : Optional[int] = attrs or []
def __enter__(self ):
*A_, A_ : Optional[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase ) ):
try:
A_ : Any = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : int = getattr(self.obj , lowercase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : str = obj_attr
# patch at top level
setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , lowercase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) )
A_ : Dict = getattr(lowercase , lowercase )
# finally set the target attribute
setattr(lowercase , lowercase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : Optional[Any] = getattr(import_module(""".""".join(lowercase ) ) , lowercase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase ) is attr_value:
A_ : Dict = getattr(self.obj , lowercase )
setattr(self.obj , lowercase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , lowercase , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , lowercase , self.original.pop(lowercase ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 667 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( __UpperCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = ['image_processor', 'tokenizer']
UpperCAmelCase : Any = 'LayoutLMv3ImageProcessor'
UpperCAmelCase : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : int=None , **_UpperCAmelCase : str ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Tuple = True , _UpperCAmelCase : Optional[int] = False , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : str = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : Any = None , _UpperCAmelCase : Dict = None , _UpperCAmelCase : str = False , _UpperCAmelCase : Optional[Any] = False , _UpperCAmelCase : Union[str, Any] = False , _UpperCAmelCase : Dict = False , _UpperCAmelCase : Dict = True , _UpperCAmelCase : Any = None , **_UpperCAmelCase : Optional[int] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
_A = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = [text] # add batch dimension (as the image processor always adds a batch dimension)
_A = features["""words"""]
_A = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
_A = features.pop('pixel_values' )
if return_overflowing_tokens is True:
_A = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
_A = images
return encoded_inputs
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_A = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}''' )
return images_with_overflow
def lowerCAmelCase_ ( self : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Tuple ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : List[Any] ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase :int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( __UpperCAmelCase ):
snake_case__ = ['pixel_values']
def __init__( self : List[Any] ,a__ : Tuple = True ,a__ : Optional[int] = None ,a__ : Union[str, Any] = PILImageResampling.BICUBIC ,a__ : Tuple = True ,a__ : Optional[int] = None ,a__ : Tuple = True ,a__ : Any = 1 / 255 ,a__ : Any = True ,a__ : Union[str, Any] = IMAGENET_DEFAULT_MEAN ,a__ : Tuple = IMAGENET_DEFAULT_STD ,**a__ : Dict ,) -> List[Any]:
"""simple docstring"""
super().__init__(**a__)
_lowerCAmelCase:Dict = size if size is not None else {"""shortest_edge""": 224}
_lowerCAmelCase:Tuple = get_size_dict(a__ ,default_to_square=a__)
_lowerCAmelCase:Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase:List[str] = get_size_dict(a__ ,param_name='''crop_size''')
_lowerCAmelCase:Optional[Any] = do_resize
_lowerCAmelCase:List[Any] = size
_lowerCAmelCase:Optional[Any] = resample
_lowerCAmelCase:Union[str, Any] = do_center_crop
_lowerCAmelCase:Tuple = crop_size
_lowerCAmelCase:int = do_rescale
_lowerCAmelCase:List[str] = rescale_factor
_lowerCAmelCase:Union[str, Any] = do_normalize
_lowerCAmelCase:Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase:Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase ( self : Dict ,a__ : str ,a__ : Dict ,a__ : int = PILImageResampling.BICUBIC ,a__ : Tuple = None ,**a__ : Any ,) -> Dict:
"""simple docstring"""
_lowerCAmelCase:str = get_size_dict(a__ ,default_to_square=a__)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowerCAmelCase:Union[str, Any] = int((256 / 224) * size['''shortest_edge'''])
_lowerCAmelCase:List[Any] = get_resize_output_image_size(a__ ,size=a__ ,default_to_square=a__)
_lowerCAmelCase:Optional[Any] = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}')
return resize(
a__ ,size=(size_dict['''height'''], size_dict['''width''']) ,resample=a__ ,data_format=a__ ,**a__)
def __UpperCamelCase ( self : List[str] ,a__ : Union[str, Any] ,a__ : List[Any] ,a__ : List[Any] = None ,**a__ : Union[str, Any] ,) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Dict = get_size_dict(a__)
if "height" not in size or "width" not in size:
raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}')
return center_crop(a__ ,size=(size['''height'''], size['''width''']) ,data_format=a__ ,**a__)
def __UpperCamelCase ( self : Dict ,a__ : Union[str, Any] ,a__ : Optional[Any] ,a__ : Optional[Any] = None ,**a__ : List[Any] ,) -> Optional[Any]:
"""simple docstring"""
return rescale(a__ ,scale=a__ ,data_format=a__ ,**a__)
def __UpperCamelCase ( self : Tuple ,a__ : str ,a__ : Any ,a__ : List[str] ,a__ : Optional[Any] = None ,**a__ : str ,) -> Optional[Any]:
"""simple docstring"""
return normalize(a__ ,mean=a__ ,std=a__ ,data_format=a__ ,**a__)
def __UpperCamelCase ( self : Dict ,a__ : Dict ,a__ : Union[str, Any] = None ,a__ : Union[str, Any] = None ,a__ : str = None ,a__ : int = None ,a__ : List[str] = None ,a__ : Tuple = None ,a__ : str = None ,a__ : int = None ,a__ : Optional[int] = None ,a__ : List[Any] = None ,a__ : List[Any] = None ,a__ : Tuple = ChannelDimension.FIRST ,**a__ : List[Any] ,) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase:Union[str, Any] = resample if resample is not None else self.resample
_lowerCAmelCase:str = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase:Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase:int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase:List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase:str = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase:Dict = image_std if image_std is not None else self.image_std
_lowerCAmelCase:Optional[int] = size if size is not None else self.size
_lowerCAmelCase:Tuple = get_size_dict(a__ ,default_to_square=a__)
_lowerCAmelCase:Optional[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase:int = get_size_dict(a__ ,param_name='''crop_size''')
_lowerCAmelCase:Tuple = make_list_of_images(a__)
if not valid_images(a__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
_lowerCAmelCase:Optional[int] = [to_numpy_array(a__) for image in images]
if do_resize:
_lowerCAmelCase:List[Any] = [self.resize(a__ ,a__ ,a__) for image in images]
if do_center_crop:
_lowerCAmelCase:List[Any] = [self.center_crop(a__ ,a__) for image in images]
if do_rescale:
_lowerCAmelCase:int = [self.rescale(a__ ,a__) for image in images]
if do_normalize:
_lowerCAmelCase:Dict = [self.normalize(a__ ,a__ ,a__) for image in images]
_lowerCAmelCase:List[str] = [to_channel_dimension_format(a__ ,a__) for image in images]
_lowerCAmelCase:Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=a__ ,tensor_type=a__)
| 227 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase , lowercase , lowercase = None , ):
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
A_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ : Optional[Any] = int(lowercase )
A_ : List[Any] = dict(sorted(self.labels.items() ) )
def _a (self , lowercase ):
if not isinstance(lowercase , lowercase ):
A_ : Optional[int] = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__(self , lowercase , lowercase = 4.0 , lowercase = None , lowercase = 50 , lowercase = "pil" , lowercase = True , ):
A_ : Tuple = len(lowercase )
A_ : Optional[Any] = self.transformer.config.sample_size
A_ : int = self.transformer.config.in_channels
A_ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
A_ : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ : Optional[int] = torch.tensor(lowercase , device=self.device ).reshape(-1 )
A_ : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
A_ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ : List[Any] = latent_model_input[: len(lowercase ) // 2]
A_ : List[str] = torch.cat([half, half] , dim=0 )
A_ : Any = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Tuple = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
A_ : List[Any] = torch.intaa if is_mps else torch.intaa
A_ : List[Any] = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ : List[Any] = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
A_, A_ : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_, A_ : List[Any] = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
A_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ : str = torch.cat([half_eps, half_eps] , dim=0 )
A_ : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_, A_ : int = torch.split(lowercase , lowercase , dim=1 )
else:
A_ : Optional[int] = noise_pred
# compute previous image: x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
A_, A_ : int = latent_model_input.chunk(2 , dim=0 )
else:
A_ : Union[str, Any] = latent_model_input
A_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
A_ : List[Any] = self.vae.decode(lowercase ).sample
A_ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : int = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 667 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> List[str]:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case_ = cst_fwd.get(lowerCamelCase__, np.inf )
snake_case_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case_ = new_cost_f
snake_case_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = -1
snake_case_ = set()
snake_case_ = set()
snake_case_ = {source: 0}
snake_case_ = {destination: 0}
snake_case_ = {source: None}
snake_case_ = {destination: None}
snake_case_ = PriorityQueue()
snake_case_ = PriorityQueue()
snake_case_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case_ = queue_forward.get()
visited_forward.add(lowerCamelCase__ )
snake_case_ = queue_backward.get()
visited_backward.add(lowerCamelCase__ )
snake_case_ = pass_and_relaxation(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
snake_case_ = pass_and_relaxation(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case_ = shortest_distance
return shortest_path_distance
a : Dict = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
a : Tuple = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640 |
'''simple docstring'''
import math
lowerCamelCase :int = 1_0
lowerCamelCase :List[Any] = 7
lowerCamelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def a ( lowerCamelCase__ = 20 ):
'''simple docstring'''
A_ : Dict = math.comb(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
A_ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 667 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , **lowerCamelCase__ ):
super().__init__(**lowerCamelCase__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowerCamelCase__ , **lowerCamelCase__ ):
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCAmelCase ( self , **lowerCamelCase__ ):
UpperCAmelCase__: Dict = {}
if "candidate_labels" in kwargs:
UpperCAmelCase__: Dict = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
UpperCAmelCase__: Union[str, Any] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__="This is a photo of {}." ):
UpperCAmelCase__: int = load_image(lowerCamelCase__ )
UpperCAmelCase__: List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCAmelCase__: Tuple = candidate_labels
UpperCAmelCase__: Tuple = [hypothesis_template.format(lowerCamelCase__ ) for x in candidate_labels]
UpperCAmelCase__: Any = self.tokenizer(lowerCamelCase__ , return_tensors=self.framework , padding=lowerCamelCase__ )
UpperCAmelCase__: Any = [text_inputs]
return inputs
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: Optional[Any] = model_inputs.pop("candidate_labels" )
UpperCAmelCase__: Any = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , lowerCamelCase__ ):
UpperCAmelCase__: Tuple = text_inputs[0]
else:
# Batching case.
UpperCAmelCase__: List[str] = text_inputs[0][0]
UpperCAmelCase__: Optional[int] = self.model(**lowerCamelCase__ , **lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: int = model_outputs.pop("candidate_labels" )
UpperCAmelCase__: Union[str, Any] = model_outputs["""logits"""][0]
if self.framework == "pt":
UpperCAmelCase__: Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCAmelCase__: List[Any] = probs.tolist()
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = [scores]
elif self.framework == "tf":
UpperCAmelCase__: Any = stable_softmax(lowerCamelCase__ , axis=-1 )
UpperCAmelCase__: Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
UpperCAmelCase__: Union[str, Any] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : -x[0] )
]
return result | 113 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Union[str, Any] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pix2struct_text_model'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
__SCREAMING_SNAKE_CASE : List[Any] = {
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=50244 , lowercase=768 , lowercase=64 , lowercase=2048 , lowercase=12 , lowercase=12 , lowercase=32 , lowercase=128 , lowercase=0.1 , lowercase=1E-6 , lowercase=1.0 , lowercase="gelu_new" , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=False , lowercase=True , **lowercase , ):
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : Optional[Any] = d_kv
A_ : Tuple = d_ff
A_ : str = num_layers
A_ : int = num_heads
A_ : Dict = relative_attention_num_buckets
A_ : Optional[Any] = relative_attention_max_distance
A_ : Dict = dropout_rate
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_factor
A_ : Any = use_cache
A_ : int = eos_token_id
A_ : Tuple = decoder_start_token_id
# for backwards compatibility
A_ : str = dense_act_fn
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , tie_word_embeddings=lowercase , is_decoder=lowercase , **lowercase , )
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[str] = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'pix2struct_vision_model'
def __init__(self , lowercase=768 , lowercase=768 , lowercase=2048 , lowercase=64 , lowercase=12 , lowercase=12 , lowercase="gelu_new" , lowercase=1E-6 , lowercase=0.0 , lowercase=0.0 , lowercase=1E-10 , lowercase=1.0 , lowercase=4096 , lowercase=32 , lowercase=128 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = hidden_size
A_ : Optional[int] = patch_embed_hidden_size
A_ : Any = d_ff
A_ : str = dropout_rate
A_ : Dict = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[Any] = initializer_range
A_ : List[str] = initializer_factor
A_ : Dict = attention_dropout
A_ : Optional[Any] = layer_norm_eps
A_ : Optional[Any] = dense_act_fn
A_ : List[Any] = seq_len
A_ : Tuple = relative_attention_num_buckets
A_ : Any = relative_attention_max_distance
A_ : int = d_kv
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = 'pix2struct'
__SCREAMING_SNAKE_CASE : List[Any] = True
def __init__(self , lowercase=None , lowercase=None , lowercase=1.0 , lowercase=0.02 , lowercase=False , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(tie_word_embeddings=lowercase , is_encoder_decoder=lowercase , **lowercase )
if text_config is None:
A_ : Optional[Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
A_ : Tuple = PixaStructTextConfig(**lowercase )
A_ : List[str] = PixaStructVisionConfig(**lowercase )
A_ : Dict = self.text_config.decoder_start_token_id
A_ : Union[str, Any] = self.text_config.pad_token_id
A_ : str = self.text_config.eos_token_id
A_ : List[str] = initializer_factor
A_ : int = initializer_range
A_ : Tuple = self.initializer_range
A_ : Tuple = self.initializer_range
A_ : List[str] = is_vqa
@classmethod
def _a (cls , lowercase , lowercase , **lowercase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def _a (self ):
A_ : Optional[Any] = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.vision_config.to_dict()
A_ : List[str] = self.__class__.model_type
return output | 667 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : str = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = 'data2vec-vision'
def __init__( self : Optional[Any] , A : Union[str, Any]=7_68 , A : List[Any]=12 , A : Union[str, Any]=12 , A : Any=30_72 , A : Optional[int]="gelu" , A : str=0.0 , A : Dict=0.0 , A : Any=0.02 , A : Optional[Any]=1e-12 , A : str=2_24 , A : List[Any]=16 , A : Union[str, Any]=3 , A : List[Any]=False , A : Dict=False , A : List[str]=False , A : Optional[int]=False , A : Optional[int]=0.1 , A : Tuple=0.1 , A : Any=True , A : List[Any]=[3, 5, 7, 11] , A : List[str]=[1, 2, 3, 6] , A : List[str]=True , A : Dict=0.4 , A : Optional[Any]=2_56 , A : Any=1 , A : List[Any]=False , A : List[str]=2_55 , **A : List[Any] , ) -> Dict:
super().__init__(**A )
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : List[str] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Dict = image_size
lowercase_ : List[str] = patch_size
lowercase_ : Any = num_channels
lowercase_ : Dict = use_mask_token
lowercase_ : Dict = use_absolute_position_embeddings
lowercase_ : List[str] = use_relative_position_bias
lowercase_ : Tuple = use_shared_relative_position_bias
lowercase_ : List[Any] = layer_scale_init_value
lowercase_ : Union[str, Any] = drop_path_rate
lowercase_ : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase_ : Any = out_indices
lowercase_ : int = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase_ : List[Any] = use_auxiliary_head
lowercase_ : List[Any] = auxiliary_loss_weight
lowercase_ : Optional[int] = auxiliary_channels
lowercase_ : Optional[Any] = auxiliary_num_convs
lowercase_ : Union[str, Any] = auxiliary_concat_input
lowercase_ : str = semantic_loss_ignore_index
class _UpperCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = version.parse("1.11" )
@property
def A ( self : Dict ) -> List[str]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A ( self : Any ) -> int:
return 1e-4
| 231 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCamelCase :Union[str, Any] = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :int = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCamelCase :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 667 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _A ( _A ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _A ( self ):
'''simple docstring'''
raise NotImplementedError()
| 148 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Any = 'LayoutLMv3ImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self , lowercase=None , lowercase=None , **lowercase ):
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
def __call__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
A_ : Optional[int] = self.image_processor(images=lowercase , return_tensors=lowercase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
A_ : Dict = features["""words"""]
A_ : Optional[int] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_token_type_ids=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
# add pixel values
A_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
A_ : List[str] = self.get_overflowing_images(lowercase , encoded_inputs["""overflow_to_sample_mapping"""] )
A_ : Optional[int] = images
return encoded_inputs
def _a (self , lowercase , lowercase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A_ : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase ) != len(lowercase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(lowercase )} and {len(lowercase )}' )
return images_with_overflow
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _a (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _a (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor | 667 | 0 |
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] = None , ):
snake_case : List[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
snake_case : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
snake_case : Union[str, Any] = format_type
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] = None ):
snake_case : List[str] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case : Dict = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
__lowerCamelCase = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
__lowerCamelCase = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
__lowerCamelCase = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def UpperCamelCase ( __lowerCamelCase : Dict ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[int] ):
snake_case : Dict = get_format_type_from_alias(lowerCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'""" )
| 204 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , **lowercase ):
super().__init__(**lowercase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowercase )
def _a (self , **lowercase ):
A_ : str = {}
A_ : Dict = {}
A_ : str = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ : Dict = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ : int = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ : Tuple = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ : Any = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ : Union[str, Any] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ : List[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ : Union[str, Any] = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowercase , *lowercase , lowercase=None , lowercase=None , **lowercase ):
return super().__call__(lowercase , *lowercase , num_workers=lowercase , batch_size=lowercase , **lowercase )
def _a (self , lowercase , lowercase=64 , lowercase = 0 , lowercase = 512 / 1500 , lowercase = 32 , lowercase = 1 , ):
A_ : Tuple = load_image(lowercase )
A_ : int = self.image_processor.size["""longest_edge"""]
A_, A_, A_, A_ : str = self.image_processor.generate_crop_boxes(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Dict = self.image_processor(images=lowercase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ : Optional[Any] = self.get_inference_context()
with inference_context():
A_ : str = self._ensure_tensor_on_device(lowercase , device=self.device )
A_ : Tuple = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ : Tuple = image_embeddings
A_ : Dict = grid_points.shape[1]
A_ : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowercase , lowercase ):
A_ : Tuple = grid_points[:, i : i + points_per_batch, :, :]
A_ : List[Any] = input_labels[:, i : i + points_per_batch]
A_ : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a (self , lowercase , lowercase=0.88 , lowercase=0.95 , lowercase=0 , lowercase=1 , ):
A_ : Any = model_inputs.pop("""input_boxes""" )
A_ : str = model_inputs.pop("""is_last""" )
A_ : int = model_inputs.pop("""original_sizes""" ).tolist()
A_ : int = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ : List[str] = self.model(**lowercase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ : Optional[int] = model_outputs["""pred_masks"""]
A_ : Tuple = self.image_processor.post_process_masks(
lowercase , lowercase , lowercase , lowercase , binarize=lowercase )
A_ : Union[str, Any] = model_outputs["""iou_scores"""]
A_, A_, A_ : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowercase , lowercase , lowercase , lowercase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a (self , lowercase , lowercase=False , lowercase=False , lowercase=0.7 , ):
A_ : Tuple = []
A_ : Optional[Any] = []
A_ : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ : Any = torch.cat(lowercase )
A_ : List[Any] = torch.cat(lowercase )
A_, A_, A_, A_ : Optional[int] = self.image_processor.post_process_for_mask_generation(
lowercase , lowercase , lowercase , lowercase )
A_ : int = defaultdict(lowercase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowercase )
A_ : Optional[int] = {}
if output_rle_mask:
A_ : List[str] = rle_mask
if output_bboxes_mask:
A_ : Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 667 | 0 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCamelCase_ = logging.get_logger(__name__)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = UNetaDModel
SCREAMING_SNAKE_CASE_ : Any = 'sample'
@property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = 4
__SCREAMING_SNAKE_CASE :Tuple = 3
__SCREAMING_SNAKE_CASE :Union[str, Any] = (32, 32)
__SCREAMING_SNAKE_CASE :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
__SCREAMING_SNAKE_CASE :Dict = self.dummy_input
return init_dict, inputs_dict
class _SCREAMING_SNAKE_CASE( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel
SCREAMING_SNAKE_CASE_ : str = 'sample'
@property
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = 4
__SCREAMING_SNAKE_CASE :Optional[int] = 4
__SCREAMING_SNAKE_CASE :str = (32, 32)
__SCREAMING_SNAKE_CASE :List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE__ )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return (4, 32, 32)
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
__SCREAMING_SNAKE_CASE :Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=SCREAMING_SNAKE_CASE__ )
model_accelerate.to(SCREAMING_SNAKE_CASE__ )
model_accelerate.eval()
__SCREAMING_SNAKE_CASE :Dict = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
__SCREAMING_SNAKE_CASE :List[str] = noise.to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = model_accelerate(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__SCREAMING_SNAKE_CASE :List[str] = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' ,output_loading_info=SCREAMING_SNAKE_CASE__ ,low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
model_normal_load.to(SCREAMING_SNAKE_CASE__ )
model_normal_load.eval()
__SCREAMING_SNAKE_CASE :List[Any] = model_normal_load(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )["""sample"""]
assert torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
__SCREAMING_SNAKE_CASE :Optional[Any] = noise.to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE :List[str] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).sample
__SCREAMING_SNAKE_CASE :List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__SCREAMING_SNAKE_CASE :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,rtol=1E-3 ) )
class _SCREAMING_SNAKE_CASE( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel
SCREAMING_SNAKE_CASE_ : str = 'sample'
@property
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=(32, 32) ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = 4
__SCREAMING_SNAKE_CASE :Dict = 3
__SCREAMING_SNAKE_CASE :Any = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=SCREAMING_SNAKE_CASE__ )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return (3, 32, 32)
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
__SCREAMING_SNAKE_CASE :str = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ,output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = self.dummy_input
__SCREAMING_SNAKE_CASE :Optional[Any] = floats_tensor((4, 3) + (2_56, 2_56) ).to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = noise
__SCREAMING_SNAKE_CASE :Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = 4
__SCREAMING_SNAKE_CASE :List[Any] = 3
__SCREAMING_SNAKE_CASE :Any = (2_56, 2_56)
__SCREAMING_SNAKE_CASE :List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Optional[int] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).sample
__SCREAMING_SNAKE_CASE :Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__SCREAMING_SNAKE_CASE :str = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = 4
__SCREAMING_SNAKE_CASE :Tuple = 3
__SCREAMING_SNAKE_CASE :Optional[Any] = (32, 32)
__SCREAMING_SNAKE_CASE :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE :int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ).sample
__SCREAMING_SNAKE_CASE :List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass | 498 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
A_ : int = np.zeros((n + 1,) )
A_ : List[str] = ya
A_ : Any = xa
for k in range(lowerCamelCase__ ):
A_ : List[Any] = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] )
A_ : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( A , A=False ):
'''simple docstring'''
UpperCAmelCase__ =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase__ =[(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def _UpperCAmelCase ( A , A , A=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ =""""""
else:
UpperCAmelCase__ ="""deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ =in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ =in_proj_bias[: config.hidden_size]
UpperCAmelCase__ =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ =in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ =in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =dct.pop(lowerCamelCase__ )
UpperCAmelCase__ =val
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ =Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase__ =False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase__ =1000
UpperCAmelCase__ ="""huggingface/label-files"""
UpperCAmelCase__ ="""imagenet-1k-id2label.json"""
UpperCAmelCase__ =json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ ={int(lowerCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ =idalabel
UpperCAmelCase__ ={v: k for k, v in idalabel.items()}
UpperCAmelCase__ =int(deit_name[-6:-4] )
UpperCAmelCase__ =int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase__ =192
UpperCAmelCase__ =768
UpperCAmelCase__ =12
UpperCAmelCase__ =3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase__ =384
UpperCAmelCase__ =1536
UpperCAmelCase__ =12
UpperCAmelCase__ =6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase__ =1024
UpperCAmelCase__ =4096
UpperCAmelCase__ =24
UpperCAmelCase__ =16
# load original model from timm
UpperCAmelCase__ =timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ =timm_model.state_dict()
UpperCAmelCase__ =create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# load HuggingFace model
UpperCAmelCase__ =DeiTForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase__ =int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase__ =DeiTImageProcessor(size=lowerCamelCase__ , crop_size=config.image_size )
UpperCAmelCase__ =image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase__ =encoding["""pixel_values"""]
UpperCAmelCase__ =model(lowerCamelCase__ )
UpperCAmelCase__ =timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1e-3 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 625 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ : Any = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , lowerCamelCase__ )
if matches:
A_ : Optional[Any] = float(matches[1] )
A_ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ : Optional[Any] = 10_01
A_ : Union[str, Any] = """imagenet-1k-id2label.json"""
A_ : List[str] = """huggingface/label-files"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(lowerCamelCase__ ) + 1: v for k, v in idalabel.items()}
A_ : int = """background"""
A_ : List[str] = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def a ( ):
'''simple docstring'''
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[Any] = get_mobilenet_va_config(lowerCamelCase__ )
# Load 🤗 model
A_ : Dict = MobileNetVaForImageClassification(lowerCamelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
A_ : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
A_ : List[str] = model(**lowerCamelCase__ )
A_ : Any = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ : int = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ : Union[str, Any] = """google/""" + model_name
image_processor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :str = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 667 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a_ : Optional[int] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = test_results.split(' ')
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase__):
if "failed" in expression:
failed += int(expressions[i - 1])
if "passed" in expression:
success += int(expressions[i - 1])
return failed, success, time_spent
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = False
for line in failures_short_lines.split('\n'):
if re.search(R'_ \[doctest\]' , lowerCamelCase__):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = line.split(' ')[2]
elif in_error and not line.split(' ')[0].isdigit():
SCREAMING_SNAKE_CASE = line
SCREAMING_SNAKE_CASE = False
return failures
class _snake_case :
def __init__( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = title
SCREAMING_SNAKE_CASE = doc_test_results["""time_spent"""].split(',')[0]
SCREAMING_SNAKE_CASE = doc_test_results["""success"""]
SCREAMING_SNAKE_CASE = doc_test_results["""failures"""]
SCREAMING_SNAKE_CASE = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE = doc_test_results
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = [self._time_spent]
SCREAMING_SNAKE_CASE = 0
for time in time_spent:
SCREAMING_SNAKE_CASE = time.split(':')
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(a) == 1:
SCREAMING_SNAKE_CASE = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'''{int(a)}h{int(a)}m{int(a)}s'''
@property
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = 40
SCREAMING_SNAKE_CASE = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(a , a)}
SCREAMING_SNAKE_CASE = """"""
for category, failures in category_failures.items():
if len(a) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(a)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(a)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
SCREAMING_SNAKE_CASE = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('Sending the following payload')
print(json.dumps({'blocks': json.loads(a)}))
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=a , )
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
print('Sending the following payload')
print(json.dumps({'blocks': json.loads(self.payload)}))
SCREAMING_SNAKE_CASE = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
SCREAMING_SNAKE_CASE = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = """"""
for key, value in failures.items():
SCREAMING_SNAKE_CASE = value[:200] + """ [Truncated]""" if len(a) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE = job_name
SCREAMING_SNAKE_CASE = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
SCREAMING_SNAKE_CASE = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.')
SCREAMING_SNAKE_CASE = self.doc_test_results.pop('job_link')
self.doc_test_results.pop('failures')
self.doc_test_results.pop('success')
self.doc_test_results.pop('time_spent')
SCREAMING_SNAKE_CASE = sorted(self.doc_test_results.items() , key=lambda a: t[0])
for job, job_result in sorted_dict:
if len(job_result['failures']):
SCREAMING_SNAKE_CASE = f'''*Num failures* :{len(job_result['failed'])} \n'''
SCREAMING_SNAKE_CASE = job_result["""failures"""]
SCREAMING_SNAKE_CASE = self.get_reply_blocks(a , a , a , text=a)
print('Sending the following reply')
print(json.dumps({'blocks': blocks}))
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'''Results for {job}''' , blocks=a , thread_ts=self.thread_ts['ts'] , )
time.sleep(1)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.environ["""GITHUB_RUN_ID"""]
SCREAMING_SNAKE_CASE = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE = requests.get(lowerCamelCase__).json()
SCREAMING_SNAKE_CASE = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']})
SCREAMING_SNAKE_CASE = math.ceil((result['total_count'] - 100) / 100)
for i in range(lowerCamelCase__):
SCREAMING_SNAKE_CASE = requests.get(url + F'''&page={i + 2}''').json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']})
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , lowerCamelCase__)
return {}
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
if os.path.exists(lowerCamelCase__):
SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase__)
for file in files:
try:
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__) , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(lowerCamelCase__ , lowerCamelCase__)}.''') from e
return _artifact
def lowerCamelCase__ ():
class _snake_case :
def __init__( self , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = name
SCREAMING_SNAKE_CASE = []
def __str__( self) -> int:
return self.name
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
self.paths.append({'name': self.name, 'path': path})
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = filter(os.path.isdir , os.listdir())
for directory in directories:
SCREAMING_SNAKE_CASE = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE = Artifact(lowerCamelCase__)
_available_artifacts[artifact_name].add_path(lowerCamelCase__)
return _available_artifacts
if __name__ == "__main__":
a_ : Optional[Any] = get_job_links()
a_ : int = retrieve_available_artifacts()
a_ : int = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a_ : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a_ : Union[str, Any] = github_actions_job_links.get('run_doctests')
a_ : Dict = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a_ : Optional[Any] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
a_ : Any = handle_test_results(artifact['stats'])
a_ : List[str] = failed
a_ : List[str] = success
a_ : Union[str, Any] = time_spent[1:-1] + ''', '''
a_ : Optional[int] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
a_ : int = line.replace('FAILED ', '')
a_ : Dict = line.split()[0].replace('\n', '')
if "::" in line:
a_ : Optional[Any] = line.split('::')
else:
a_ : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a_ : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a_ : List[str] = all_failures[test] if test in all_failures else '''N/A'''
a_ : Optional[Any] = failure
break
a_ : int = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 73 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'AutoTokenizer'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['tokenizer']
__SCREAMING_SNAKE_CASE : Tuple = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__(self , lowercase , lowercase=None ):
super().__init__(lowercase )
A_ : Any = speaker_embeddings
@classmethod
def _a (cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
A_ : Any = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowercase , lowercase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A_ : str = None
else:
with open(lowercase ) as speaker_embeddings_json:
A_ : List[str] = json.load(lowercase )
else:
A_ : str = None
A_ : int = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def _a (self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , """v2""" ) , exist_ok=lowercase )
A_ : Optional[int] = {}
A_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ : Union[str, Any] = self._load_voice_preset(lowercase )
A_ : Tuple = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , lowercase , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowercase , )
A_ : List[str] = os.path.join(lowercase , F'{prompt_key}_{key}.npy' )
A_ : str = tmp_dict
with open(os.path.join(lowercase , lowercase ) , """w""" ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def _a (self , lowercase = None , **lowercase ):
A_ : List[Any] = self.speaker_embeddings[voice_preset]
A_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A_ : int = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , lowercase ) , cache_dir=kwargs.pop("""cache_dir""" , lowercase ) , force_download=kwargs.pop("""force_download""" , lowercase ) , proxies=kwargs.pop("""proxies""" , lowercase ) , resume_download=kwargs.pop("""resume_download""" , lowercase ) , local_files_only=kwargs.pop("""local_files_only""" , lowercase ) , use_auth_token=kwargs.pop("""use_auth_token""" , lowercase ) , revision=kwargs.pop("""revision""" , lowercase ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A_ : Tuple = np.load(lowercase )
return voice_preset_dict
def _a (self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__(self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ : Optional[int] = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith(""".npz""" ):
A_ : Optional[int] = voice_preset + """.npz"""
A_ : Any = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
A_ : Optional[int] = BatchFeature(data=lowercase , tensor_type=lowercase )
A_ : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding="""max_length""" , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
A_ : Union[str, Any] = voice_preset
return encoded_text | 667 | 0 |
from __future__ import annotations
from math import pow, sqrt
def a_ ( _A , _A , _A ) -> Union[str, Any]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCamelCase__ , 2 ) - pow(lowerCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCamelCase__ , 2 ) - pow(lowerCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCamelCase__ , 2 ) + pow(lowerCamelCase__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def _snake_case ( _snake_case : Any ) -> Tuple:
'''simple docstring'''
_A = credit_card_number
_A = 0
_A = len(lowerCamelCase__ ) - 2
for i in range(lowerCamelCase__ , -1 , -2 ):
# double the value of every second digit
_A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_A = cc_number[:i] + str(lowerCamelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCamelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
_A = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(lowerCamelCase__ ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(lowerCamelCase__ ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(lowerCamelCase__ ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : int = image_size
A_ : List[str] = patch_size
A_ : Tuple = num_channels
A_ : List[str] = max_token_length
A_ : int = num_character_labels
A_ : str = num_bpe_labels
A_ : Tuple = num_wordpiece_labels
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Tuple = mlp_ratio
A_ : str = distilled
A_ : Union[str, Any] = layer_norm_eps
A_ : str = drop_rate
A_ : int = qkv_bias
A_ : Dict = attn_drop_rate
A_ : List[Any] = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Union[str, Any] = initializer_range | 667 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = "▁"
_UpperCAmelCase : str = {"vocab_file": "spiece.model"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
_UpperCAmelCase : int = {
"google/reformer-crime-and-punishment": 524_288,
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :List[Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="<unk>" , SCREAMING_SNAKE_CASE_ : int=[] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
@property
def __snake_case ( self : Union[str, Any] ):
return self.sp_model.get_piece_size()
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
return token
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
lowerCAmelCase__ = u
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = temp * (u - i)
return temp
def lowerCAmelCase_ () -> None:
'''simple docstring'''
lowerCAmelCase__ = int(input('''enter the numbers of values: ''' ) )
lowerCAmelCase__ = []
for _ in range(lowercase__ ):
y.append([] )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
y[i].append(lowercase__ )
lowerCAmelCase__ = 0
print('''enter the values of parameters in a list: ''' )
lowerCAmelCase__ = list(map(lowercase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(lowercase__ ):
lowerCAmelCase__ = float(input() )
lowerCAmelCase__ = int(input('''enter the value to interpolate: ''' ) )
lowerCAmelCase__ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowercase__ ):
for j in range(n - i ):
lowerCAmelCase__ = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase__ = y[0][0]
for i in range(1 , lowercase__ ):
summ += (ucal(lowercase__ , lowercase__ ) * y[0][i]) / math.factorial(lowercase__ )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 668 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Union[str, Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase__ )
trainer.save_metrics('''train''' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('''eval''' , lowercase__ )
trainer.save_metrics('''eval''' , lowercase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowercase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowercase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCAmelCase_ (lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase__ = flatten_dict(lowercase__ )
return flax_params
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = {}
lowerCAmelCase__ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCAmelCase__ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCAmelCase__ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCAmelCase__ = new_key.replace(lowercase__ , lowercase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCAmelCase__ = new_key.replace(lowercase__ , lowercase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCAmelCase__ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , lowercase__ )
lowerCAmelCase__ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCAmelCase__ = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , lowercase__ )
lowerCAmelCase__ = flax_dict[key]
lowerCAmelCase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCAmelCase__ = torch.from_numpy(converted_dict[key].T )
else:
lowerCAmelCase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : List[str]=False , lowercase__ : Any=False ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = get_flax_param(lowercase__ )
if not use_large:
lowerCAmelCase__ = PixaStructVisionConfig()
lowerCAmelCase__ = PixaStructTextConfig()
else:
lowerCAmelCase__ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCAmelCase__ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
lowerCAmelCase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowercase__ )
lowerCAmelCase__ = PixaStructForConditionalGeneration(lowercase__ )
lowerCAmelCase__ = rename_and_convert_flax_params(lowercase__ )
model.load_state_dict(lowercase__ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCAmelCase__ = PixaStructImageProcessor()
lowerCAmelCase__ = PixaStructProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
if use_large:
lowerCAmelCase__ = 40_96
lowerCAmelCase__ = True
# mkdir if needed
os.makedirs(lowercase__ , exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
print('''Model saved in {}'''.format(lowercase__ ) )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_UpperCAmelCase : int = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 668 |
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 668 | 1 |
def lowerCAmelCase_ (lowercase__ : int = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
lowerCAmelCase__ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
while i * i <= n:
while n % i == 0:
lowerCAmelCase__ = i
n //= i
i += 1
if n > 1:
lowerCAmelCase__ = n
return int(lowercase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 668 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : int = Mapping[str, np.ndarray]
_UpperCAmelCase : Optional[Any] = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class lowerCAmelCase_ :
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ :Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ :Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCamelCase_ :Optional[Sequence[int]] = None
def lowerCAmelCase_ (lowercase__ : str ) -> Protein:
'''simple docstring'''
lowerCAmelCase__ = r'''(\[[A-Z]+\]\n)'''
lowerCAmelCase__ = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowerCAmelCase__ = ["N", "CA", "C"]
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase__ = g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase__ = '''X''' # FIXME: strings are immutable
lowerCAmelCase__ = np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
lowerCAmelCase__ = np.array(lowercase__ )
lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase__ = np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : int = 0 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
lowerCAmelCase__ = prot.parents
lowerCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase__ = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
lowerCAmelCase__ = ['''N/A''']
pdb_headers.append(f'PARENT {" ".join(lowercase__ )}' )
return pdb_headers
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = pdb_str.split('''\n''' )
lowerCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
lowerCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase__ = []
if prot.parents_chain_index is not None:
lowerCAmelCase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
lowerCAmelCase__ = max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase__ = parent_dict.get(str(lowercase__ ) , ['''N/A'''] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase__ = [['''N/A''']]
def make_parent_line(lowercase__ : Sequence[str] ) -> str:
return f'PARENT {" ".join(lowercase__ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase__ = 0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
lowerCAmelCase__ = parents_per_chain[chain_counter]
else:
lowerCAmelCase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> str:
'''simple docstring'''
lowerCAmelCase__ = residue_constants.restypes + ['''X''']
def res_atoa(lowercase__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowerCAmelCase__ = residue_constants.atom_types
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.atom_mask
lowerCAmelCase__ = prot.aatype
lowerCAmelCase__ = prot.atom_positions
lowerCAmelCase__ = prot.residue_index.astype(np.intaa )
lowerCAmelCase__ = prot.b_factors
lowerCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowerCAmelCase__ = get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
lowerCAmelCase__ = aatype.shape[0]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = string.ascii_uppercase
lowerCAmelCase__ = None
# Add all atom sites.
for i in range(lowercase__ ):
lowerCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase__ = '''ATOM'''
lowerCAmelCase__ = atom_name if len(lowercase__ ) == 4 else f' {atom_name}'
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1.00
lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''A'''
if chain_index is not None:
lowerCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase__ = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
lowerCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase__ = True
lowerCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase__ = '''TER'''
lowerCAmelCase__ = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ (lowercase__ : FeatureDict , lowercase__ : ModelOutput , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional[Sequence[str]] = None , lowercase__ : Optional[Sequence[int]] = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 668 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCAmelCase_ (lowercase__ : str ) -> str:
'''simple docstring'''
return "".join(sorted(lowercase__ ) )
def lowerCAmelCase_ (lowercase__ : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(lowercase__ )]
_UpperCAmelCase : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
_UpperCAmelCase : Any = sorted({word.strip().lower() for word in data.splitlines()})
_UpperCAmelCase : Dict = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 668 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : int ) -> int:
'''simple docstring'''
if exitstatus == 5:
lowerCAmelCase__ = 0
# Doctest custom flag to ignore output.
_UpperCAmelCase : Any = doctest.register_optionflag("IGNORE_RESULT")
_UpperCAmelCase : Dict = doctest.OutputChecker
class lowerCAmelCase_ ( snake_case__ ):
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Union[str, Any] = CustomOutputChecker
_UpperCAmelCase : Dict = HfDoctestModule
_UpperCAmelCase : List[str] = HfDocTestParser
| 668 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_UpperCAmelCase : List[Any] = "src/transformers"
_UpperCAmelCase : int = "docs/source/en/tasks"
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
with open(lowercase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
_UpperCAmelCase : int = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_UpperCAmelCase : List[str] = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def lowerCAmelCase_ (lowercase__ : Any ) -> str:
'''simple docstring'''
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : Tuple=False ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
lowerCAmelCase__ = get_model_list_for_task(lowercase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowercase__ , lowercase__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_UpperCAmelCase : Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 668 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Union[str, Any] = XLMRobertaTokenizer
UpperCamelCase_ :List[str] = XLMRobertaTokenizerFast
UpperCamelCase_ :Dict = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_002 )
def __snake_case ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __snake_case ( self : Optional[int] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase__ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@cached_property
def __snake_case ( self : Dict ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __snake_case ( self : Tuple ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name )
lowerCAmelCase__ = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = pickle.dumps(SCREAMING_SNAKE_CASE_ )
pickle.loads(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def __snake_case ( self : Any ):
lowerCAmelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase__ = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def __snake_case ( self : int ):
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 668 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 | 1 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : list[int] ) -> bool:
'''simple docstring'''
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
from typing import Any
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None:
'''simple docstring'''
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase__ = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase__ = '''nested dictionary ''' if nested else ''''''
lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Any = 'transfo-xl'
UpperCamelCase_ :Dict = ['mems']
UpperCamelCase_ :Any = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=267_735 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[20_000, 40_000, 200_000] , SCREAMING_SNAKE_CASE_ : Optional[int]=1_024 , SCREAMING_SNAKE_CASE_ : Tuple=1_024 , SCREAMING_SNAKE_CASE_ : Optional[Any]=16 , SCREAMING_SNAKE_CASE_ : str=64 , SCREAMING_SNAKE_CASE_ : Dict=4_096 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Dict=18 , SCREAMING_SNAKE_CASE_ : Dict=1_600 , SCREAMING_SNAKE_CASE_ : List[Any]=1_000 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=-1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]="normal" , SCREAMING_SNAKE_CASE_ : str=0.01 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.01 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=1e-5 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE_ )
if proj_share_all_but_first:
lowerCAmelCase__ = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase__ = [False] + [False] * len(self.cutoffs )
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_embed
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = div_val
lowerCAmelCase__ = pre_lnorm
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = sample_softmax
lowerCAmelCase__ = adaptive
lowerCAmelCase__ = dropout
lowerCAmelCase__ = dropatt
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = init
lowerCAmelCase__ = init_range
lowerCAmelCase__ = proj_init_std
lowerCAmelCase__ = init_std
lowerCAmelCase__ = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def __snake_case ( self : List[Any] ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 668 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_048 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Dict=[16, 16] , SCREAMING_SNAKE_CASE_ : Tuple=128 , SCREAMING_SNAKE_CASE_ : Optional[Any]=44_100 , SCREAMING_SNAKE_CASE_ : Optional[int]=86 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_048 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spectrogram_length
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = feature_size // self.patch_size[1]
lowerCAmelCase__ = n_fft
lowerCAmelCase__ = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : np.array ):
lowerCAmelCase__ = spectrogram(
SCREAMING_SNAKE_CASE_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCAmelCase__ = log_spec[:, :-1]
lowerCAmelCase__ = log_spec - 20.0
lowerCAmelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ = np.ones([len(SCREAMING_SNAKE_CASE_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = audio_features[i]
lowerCAmelCase__ = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features}
lowerCAmelCase__ = BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
return encoded_inputs
| 668 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 |
from collections import namedtuple
_UpperCAmelCase : Dict = namedtuple("from_to", "from_ to")
_UpperCAmelCase : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : str , lowercase__ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
def lowerCAmelCase_ (lowercase__ : str ) -> Tuple:
'''simple docstring'''
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def lowerCAmelCase_ (lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
_UpperCAmelCase : int = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : str = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = collection[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = i - 1
while low <= high:
lowerCAmelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase__ = collection[j - 1]
lowerCAmelCase__ = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 668 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_UpperCAmelCase : Dict = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_UpperCAmelCase : str = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_UpperCAmelCase : List[Any] = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 668 |
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ ) + 1
lowerCAmelCase__ = len(lowercase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowercase__ ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowercase__ ):
for j in range(1 , lowercase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCAmelCase : Union[str, Any] = "aab"
_UpperCAmelCase : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 668 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 | 1 |
def lowerCAmelCase_ (lowercase__ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = limit + 1
lowerCAmelCase__ = [0] * limit
for first_term in range(1 , lowercase__ ):
for n in range(lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCAmelCase__ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 668 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
lowerCAmelCase__ = number_of_bytes // partitions
lowerCAmelCase__ = []
for i in range(lowercase__ ):
lowerCAmelCase__ = i * bytes_per_partition + 1
lowerCAmelCase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
from collections import deque
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = process_name # process name
lowerCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ = arrival_time
lowerCAmelCase__ = burst_time # remaining burst time
lowerCAmelCase__ = 0 # total time of the process wait in ready queue
lowerCAmelCase__ = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ):
# total number of mlfq's queues
lowerCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ = queue
# current time
lowerCAmelCase__ = current_time
# finished process is in this sequence queue
lowerCAmelCase__ = deque()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
lowerCAmelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ = 0
# set the finish time
lowerCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process("P1", 0, 53)
_UpperCAmelCase : Tuple = Process("P2", 0, 17)
_UpperCAmelCase : int = Process("P3", 0, 68)
_UpperCAmelCase : str = Process("P4", 0, 24)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = [17, 25]
_UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("P1", 0, 53)
_UpperCAmelCase : List[str] = Process("P2", 0, 17)
_UpperCAmelCase : Any = Process("P3", 0, 68)
_UpperCAmelCase : List[Any] = Process("P4", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : str = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 668 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :List[str] = 'xlnet'
UpperCamelCase_ :str = ['mems']
UpperCamelCase_ :Union[str, Any] = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=32_000 , SCREAMING_SNAKE_CASE_ : List[str]=1_024 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=24 , SCREAMING_SNAKE_CASE_ : str=16 , SCREAMING_SNAKE_CASE_ : str=4_096 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]="bi" , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=512 , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Any=-1 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : List[Any]="last" , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]="tanh" , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = kwargs['''use_cache''']
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def __snake_case ( self : Optional[Any] ):
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 668 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 1 |
import requests
_UpperCAmelCase : Optional[Any] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCAmelCase_ (lowercase__ : str ) -> None:
'''simple docstring'''
lowerCAmelCase__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 668 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase : str = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCAmelCase : List[str] = {
"ctrl": 256,
}
_UpperCAmelCase : int = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Optional[int] = CONTROL_CODES
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 668 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Optional[int] = 'fnet'
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=32_000 , SCREAMING_SNAKE_CASE_ : int=768 , SCREAMING_SNAKE_CASE_ : Optional[Any]=12 , SCREAMING_SNAKE_CASE_ : int=3_072 , SCREAMING_SNAKE_CASE_ : int="gelu_new" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , SCREAMING_SNAKE_CASE_ : int=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=1e-12 , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=512 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = use_tpu_fourier_optimizations
lowerCAmelCase__ = tpu_short_seq_length
| 668 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError()
def __snake_case ( self : Union[str, Any] ):
raise NotImplementedError()
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = skip_prompt
lowerCAmelCase__ = decode_kwargs
# variables used in the streaming process
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowerCAmelCase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase__ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase__ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = Queue()
lowerCAmelCase__ = None
lowerCAmelCase__ = timeout
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
return self
def __snake_case ( self : int ):
lowerCAmelCase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 668 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 668 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668 | 1 |
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
while b:
lowerCAmelCase__ , lowerCAmelCase__ = b, a % b
return a
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(lowercase__ , a % b )
def lowerCAmelCase_ () -> Optional[int]:
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 668 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
lowerCAmelCase__ = list(range(len(lowercase__ ) ) )
lowerCAmelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : int = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any:
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = [parquet_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: parquet_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ = {'''image''': [image_path]}
lowerCAmelCase__ = Features({'''image''': Image()} )
lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ )
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
assert get_writer_batch_size(lowercase__ ) == expected
| 668 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = 'data2vec-audio'
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Any=768 , SCREAMING_SNAKE_CASE_ : List[Any]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : int=3_072 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Dict=1e-5 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=16 , SCREAMING_SNAKE_CASE_ : Tuple=19 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.05 , SCREAMING_SNAKE_CASE_ : Optional[Any]=10 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : int=10 , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : Dict="sum" , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=256 , SCREAMING_SNAKE_CASE_ : Dict=(512, 512, 512, 512, 1_500) , SCREAMING_SNAKE_CASE_ : Any=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Tuple=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Optional[int]=512 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : List[str]=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = feat_extract_activation
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = conv_bias
lowerCAmelCase__ = num_conv_pos_embeddings
lowerCAmelCase__ = num_conv_pos_embedding_groups
lowerCAmelCase__ = conv_pos_kernel_size
lowerCAmelCase__ = len(self.conv_dim )
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = feat_proj_dropout
lowerCAmelCase__ = final_dropout
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ = mask_time_prob
lowerCAmelCase__ = mask_time_length
lowerCAmelCase__ = mask_time_min_masks
lowerCAmelCase__ = mask_feature_prob
lowerCAmelCase__ = mask_feature_length
lowerCAmelCase__ = mask_feature_min_masks
# ctc loss
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# adapter
lowerCAmelCase__ = add_adapter
lowerCAmelCase__ = adapter_kernel_size
lowerCAmelCase__ = adapter_stride
lowerCAmelCase__ = num_adapter_layers
lowerCAmelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = xvector_output_dim
@property
def __snake_case ( self : Union[str, Any] ):
return math.prod(self.conv_stride )
| 668 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_UpperCAmelCase : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( datasets.BuilderConfig ):
UpperCamelCase_ :Optional[datasets.Features] = None
UpperCamelCase_ :str = "utf-8"
UpperCamelCase_ :Optional[str] = None
UpperCamelCase_ :Optional[str] = None
UpperCamelCase_ :bool = True # deprecated
UpperCamelCase_ :Optional[int] = None # deprecated
UpperCamelCase_ :int = 10 << 20 # 10MB
UpperCamelCase_ :Optional[bool] = None
class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ):
UpperCamelCase_ :List[Any] = JsonConfig
def __snake_case ( self : List[str] ):
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowerCAmelCase__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCAmelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
lowerCAmelCase__ = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowerCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) )
return splits
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCAmelCase__ = self.config.features.arrow_schema.field(SCREAMING_SNAKE_CASE_ ).type
lowerCAmelCase__ = pa_table.append_column(SCREAMING_SNAKE_CASE_ , pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=SCREAMING_SNAKE_CASE_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase__ = table_cast(SCREAMING_SNAKE_CASE_ , self.config.features.arrow_schema )
return pa_table
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
# We keep only the field we are interested in
lowerCAmelCase__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
lowerCAmelCase__ = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase__ = {col: [row.get(SCREAMING_SNAKE_CASE_ ) for row in dataset] for col in keys}
else:
lowerCAmelCase__ = dataset
lowerCAmelCase__ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_ )
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_ )
# If the file has one json object per line
else:
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
lowerCAmelCase__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCAmelCase__ = max(self.config.chunksize // 32 , 16 << 10 )
lowerCAmelCase__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowerCAmelCase__ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(SCREAMING_SNAKE_CASE_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCAmelCase__ = batch.decode(self.config.encoding , errors=SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' )
try:
while True:
try:
lowerCAmelCase__ = paj.read_json(
io.BytesIO(SCREAMING_SNAKE_CASE_ ) , read_options=paj.ReadOptions(block_size=SCREAMING_SNAKE_CASE_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(SCREAMING_SNAKE_CASE_ , pa.ArrowInvalid )
and "straddling" not in str(SCREAMING_SNAKE_CASE_ )
or block_size > len(SCREAMING_SNAKE_CASE_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(SCREAMING_SNAKE_CASE_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # list is the only sequence type supported in JSON
try:
lowerCAmelCase__ = set().union(*[row.keys() for row in dataset] )
lowerCAmelCase__ = {col: [row.get(SCREAMING_SNAKE_CASE_ ) for row in dataset] for col in keys}
lowerCAmelCase__ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_ )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ )
batch_idx += 1
| 668 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Union[str, Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase__ )
trainer.save_metrics('''train''' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('''eval''' , lowercase__ )
trainer.save_metrics('''eval''' , lowercase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowercase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowercase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :str = 'codegen'
UpperCamelCase_ :Any = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=50_400 , SCREAMING_SNAKE_CASE_ : Dict=2_048 , SCREAMING_SNAKE_CASE_ : List[str]=2_048 , SCREAMING_SNAKE_CASE_ : Any=4_096 , SCREAMING_SNAKE_CASE_ : Optional[int]=28 , SCREAMING_SNAKE_CASE_ : List[Any]=16 , SCREAMING_SNAKE_CASE_ : Any=64 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : int="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Any=1e-5 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Any=50_256 , SCREAMING_SNAKE_CASE_ : List[Any]=50_256 , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_ctx
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_embd
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = n_inner
lowerCAmelCase__ = rotary_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = attn_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : PretrainedConfig , SCREAMING_SNAKE_CASE_ : str = "default" , SCREAMING_SNAKE_CASE_ : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
super().__init__(SCREAMING_SNAKE_CASE_ , task=SCREAMING_SNAKE_CASE_ , patching_specs=SCREAMING_SNAKE_CASE_ , use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
lowerCAmelCase__ = 0
@property
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
lowerCAmelCase__ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __snake_case ( self : Optional[Any] ):
return self._config.n_layer
@property
def __snake_case ( self : Any ):
return self._config.n_head
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : int = -1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , ):
lowerCAmelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
lowerCAmelCase__ = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase__ = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def __snake_case ( self : Tuple ):
return 13
| 668 |
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 668 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Optional[int] = 'nllb-moe'
UpperCamelCase_ :Any = ['past_key_values']
UpperCamelCase_ :str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int=128_112 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_024 , SCREAMING_SNAKE_CASE_ : Tuple=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4_096 , SCREAMING_SNAKE_CASE_ : List[str]=16 , SCREAMING_SNAKE_CASE_ : List[Any]=12 , SCREAMING_SNAKE_CASE_ : int=4_096 , SCREAMING_SNAKE_CASE_ : Optional[int]=16 , SCREAMING_SNAKE_CASE_ : str=0.05 , SCREAMING_SNAKE_CASE_ : int=0.05 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]="relu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_024 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[Any]="float32" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=128 , SCREAMING_SNAKE_CASE_ : Optional[Any]=64 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : Dict=0.001 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.001 , SCREAMING_SNAKE_CASE_ : Optional[Any]="all" , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Tuple=1.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.2 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : List[str]=False , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = d_model
lowerCAmelCase__ = encoder_ffn_dim
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = encoder_attention_heads
lowerCAmelCase__ = decoder_ffn_dim
lowerCAmelCase__ = decoder_layers
lowerCAmelCase__ = decoder_attention_heads
lowerCAmelCase__ = dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = init_std
lowerCAmelCase__ = encoder_layerdrop
lowerCAmelCase__ = decoder_layerdrop
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase__ = router_z_loss_coef
lowerCAmelCase__ = router_aux_loss_coef
lowerCAmelCase__ = decoder_sparse_step
lowerCAmelCase__ = encoder_sparse_step
lowerCAmelCase__ = num_experts
lowerCAmelCase__ = expert_capacity
lowerCAmelCase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
lowerCAmelCase__ = router_dtype
lowerCAmelCase__ = router_ignore_padding_tokens
lowerCAmelCase__ = batch_prioritized_routing
lowerCAmelCase__ = second_expert_policy
lowerCAmelCase__ = normalize_router_prob_before_dropping
lowerCAmelCase__ = moe_eval_capacity_token_fraction
lowerCAmelCase__ = moe_token_dropout
lowerCAmelCase__ = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 668 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_UpperCAmelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_UpperCAmelCase : List[Any] = 128_022
_UpperCAmelCase : Dict = 128_028
@require_sentencepiece
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Optional[int] = MaMaaaTokenizer
UpperCamelCase_ :List[str] = False
UpperCamelCase_ :Optional[int] = False
UpperCamelCase_ :int = True
def __snake_case ( self : Dict ):
super().setUp()
lowerCAmelCase__ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE_ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowerCAmelCase__ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : int ):
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = '''</s>'''
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def __snake_case ( self : Any ):
pass
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [2, 3, 4, 5, 6] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
lowerCAmelCase__ = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''This is a test''' )
@slow
def __snake_case ( self : List[str] ):
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCamelCase_ :Dict = 'facebook/m2m100_418M'
UpperCamelCase_ :List[str] = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
UpperCamelCase_ :Optional[Any] = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
UpperCamelCase_ :Dict = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __snake_case ( cls : Union[str, Any] ):
lowerCAmelCase__ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
lowerCAmelCase__ = 1
return cls
def __snake_case ( self : List[str] ):
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 128_063 )
def __snake_case ( self : Any ):
lowerCAmelCase__ = self.tokenizer.get_vocab()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = '''en'''
lowerCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
# fmt: off
lowerCAmelCase__ = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
lowerCAmelCase__ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = MaMaaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.lang_token_to_id , SCREAMING_SNAKE_CASE_ )
@require_torch
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = '''en'''
lowerCAmelCase__ = '''fr'''
lowerCAmelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowerCAmelCase__ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
lowerCAmelCase__ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
lowerCAmelCase__ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
lowerCAmelCase__ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __snake_case ( self : str ):
lowerCAmelCase__ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[128_022, 58, 4_183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128_006,
} , )
| 668 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : int = Mapping[str, np.ndarray]
_UpperCAmelCase : Optional[Any] = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class lowerCAmelCase_ :
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ :Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ :Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCamelCase_ :Optional[Sequence[int]] = None
def lowerCAmelCase_ (lowercase__ : str ) -> Protein:
'''simple docstring'''
lowerCAmelCase__ = r'''(\[[A-Z]+\]\n)'''
lowerCAmelCase__ = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowerCAmelCase__ = ["N", "CA", "C"]
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase__ = g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase__ = '''X''' # FIXME: strings are immutable
lowerCAmelCase__ = np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
lowerCAmelCase__ = np.array(lowercase__ )
lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase__ = np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : int = 0 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
lowerCAmelCase__ = prot.parents
lowerCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase__ = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
lowerCAmelCase__ = ['''N/A''']
pdb_headers.append(f'PARENT {" ".join(lowercase__ )}' )
return pdb_headers
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = pdb_str.split('''\n''' )
lowerCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
lowerCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase__ = []
if prot.parents_chain_index is not None:
lowerCAmelCase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
lowerCAmelCase__ = max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase__ = parent_dict.get(str(lowercase__ ) , ['''N/A'''] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase__ = [['''N/A''']]
def make_parent_line(lowercase__ : Sequence[str] ) -> str:
return f'PARENT {" ".join(lowercase__ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase__ = 0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
lowerCAmelCase__ = parents_per_chain[chain_counter]
else:
lowerCAmelCase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> str:
'''simple docstring'''
lowerCAmelCase__ = residue_constants.restypes + ['''X''']
def res_atoa(lowercase__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowerCAmelCase__ = residue_constants.atom_types
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.atom_mask
lowerCAmelCase__ = prot.aatype
lowerCAmelCase__ = prot.atom_positions
lowerCAmelCase__ = prot.residue_index.astype(np.intaa )
lowerCAmelCase__ = prot.b_factors
lowerCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowerCAmelCase__ = get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
lowerCAmelCase__ = aatype.shape[0]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = string.ascii_uppercase
lowerCAmelCase__ = None
# Add all atom sites.
for i in range(lowercase__ ):
lowerCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase__ = '''ATOM'''
lowerCAmelCase__ = atom_name if len(lowercase__ ) == 4 else f' {atom_name}'
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1.00
lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''A'''
if chain_index is not None:
lowerCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase__ = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
lowerCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase__ = True
lowerCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase__ = '''TER'''
lowerCAmelCase__ = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ (lowercase__ : FeatureDict , lowercase__ : ModelOutput , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional[Sequence[str]] = None , lowercase__ : Optional[Sequence[int]] = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 668 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = XLMProphetNetTokenizer
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Union[str, Any] = True
def __snake_case ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = '''[PAD]'''
lowerCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_012 )
def __snake_case ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def __snake_case ( self : int ):
lowerCAmelCase__ = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __snake_case ( self : List[Any] ):
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [35_389, 6_672, 49, 2]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def __snake_case ( self : Union[str, Any] ):
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 668 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : int ) -> int:
'''simple docstring'''
if exitstatus == 5:
lowerCAmelCase__ = 0
# Doctest custom flag to ignore output.
_UpperCAmelCase : Any = doctest.register_optionflag("IGNORE_RESULT")
_UpperCAmelCase : Dict = doctest.OutputChecker
class lowerCAmelCase_ ( snake_case__ ):
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Union[str, Any] = CustomOutputChecker
_UpperCAmelCase : Dict = HfDoctestModule
_UpperCAmelCase : List[str] = HfDocTestParser
| 668 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self : Any ):
lowerCAmelCase__ = 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = (32, 32)
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def __snake_case ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __snake_case ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __snake_case ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ )
@property
def __snake_case ( self : int ):
def extract(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ):
class lowerCAmelCase_ :
def __init__( self : int ):
lowerCAmelCase__ = torch.ones([0] )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ):
self.pixel_values.to(SCREAMING_SNAKE_CASE_ )
return self
return Out()
return extract
def __snake_case ( self : Any ):
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.dummy_cond_unet
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowerCAmelCase__ = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowerCAmelCase__ = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.dummy_cond_unet
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
# put models in fp16
lowerCAmelCase__ = unet.half()
lowerCAmelCase__ = vae.half()
lowerCAmelCase__ = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase__ = init_image.resize((760, 504) )
lowerCAmelCase__ = '''BAAI/AltDiffusion'''
lowerCAmelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
lowerCAmelCase__ = output.images[0]
lowerCAmelCase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCAmelCase__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase__ = init_image.resize((768, 512) )
lowerCAmelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCAmelCase__ = '''BAAI/AltDiffusion'''
lowerCAmelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 668 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=7 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : Dict=32 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=512 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = bos_token_id
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCAmelCase__ = input_mask.numpy()
lowerCAmelCase__ , lowerCAmelCase__ = input_mask.shape
lowerCAmelCase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = TFBlipTextModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : Any ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase_ :str = False
UpperCamelCase_ :int = False
UpperCamelCase_ :Dict = False
def __snake_case ( self : int ):
lowerCAmelCase__ = BlipTextModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Any ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
pass
def __snake_case ( self : Dict ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def __snake_case ( self : str ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __snake_case ( self : Tuple ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def __snake_case ( self : Dict ):
pass
@slow
def __snake_case ( self : List[str] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE_ )
| 668 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668 |
from typing import Any
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None:
'''simple docstring'''
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase__ = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase__ = '''nested dictionary ''' if nested else ''''''
lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_048 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Dict=[16, 16] , SCREAMING_SNAKE_CASE_ : Tuple=128 , SCREAMING_SNAKE_CASE_ : Optional[Any]=44_100 , SCREAMING_SNAKE_CASE_ : Optional[int]=86 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_048 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spectrogram_length
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = feature_size // self.patch_size[1]
lowerCAmelCase__ = n_fft
lowerCAmelCase__ = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : np.array ):
lowerCAmelCase__ = spectrogram(
SCREAMING_SNAKE_CASE_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCAmelCase__ = log_spec[:, :-1]
lowerCAmelCase__ = log_spec - 20.0
lowerCAmelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ = np.ones([len(SCREAMING_SNAKE_CASE_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = audio_features[i]
lowerCAmelCase__ = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features}
lowerCAmelCase__ = BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
return encoded_inputs
| 668 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple=10 ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = []
for _ in range(lowercase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict=10 ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
for step in range(lowercase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ = os.path.join(lowercase__ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , lowercase__ )
lowerCAmelCase__ = torch.load(lowercase__ )
scheduler.load_state_dict(lowercase__ )
return lrs
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
lowerCAmelCase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase__ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
lowerCAmelCase__ = criterion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase__ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE_ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE_ , scale_parameter=SCREAMING_SNAKE_CASE_ , warmup_init=SCREAMING_SNAKE_CASE_ , )
for _ in range(1_000 ):
lowerCAmelCase__ = criterion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCamelCase_ :Optional[int] = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCamelCase_ :Tuple = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCamelCase_ :Union[str, Any] = 10
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ , msg=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase__ , lowerCAmelCase__ = data
lowerCAmelCase__ = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase__ = unwrap_schedule(SCREAMING_SNAKE_CASE_ , self.num_steps )
self.assertListAlmostEqual(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tol=1e-2 , msg=f'failed for {scheduler_func} in normal scheduler' , )
lowerCAmelCase__ = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE_ ) # wrap to test picklability of the schedule
lowerCAmelCase__ = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE_ , self.num_steps )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , msg=f'failed for {scheduler_func} in save and reload' )
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ = fn
def __call__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Any ):
return self.fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = list(map(self , scheduler.lr_lambdas ) )
| 668 |
from collections import namedtuple
_UpperCAmelCase : Dict = namedtuple("from_to", "from_ to")
_UpperCAmelCase : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : str , lowercase__ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ):
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 100 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : bool = True , ):
if audio_length_in_s is None:
lowerCAmelCase__ = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCAmelCase__ = audio_length_in_s * self.unet.config.sample_rate
lowerCAmelCase__ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
f' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
lowerCAmelCase__ = int(SCREAMING_SNAKE_CASE_ )
if sample_size % down_scale_factor != 0:
lowerCAmelCase__ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
''' process.''' )
lowerCAmelCase__ = int(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = next(iter(self.unet.parameters() ) ).dtype
lowerCAmelCase__ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowerCAmelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=audio.device )
lowerCAmelCase__ = self.scheduler.timesteps.to(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCAmelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
lowerCAmelCase__ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCAmelCase__ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE_ )
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = collection[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = i - 1
while low <= high:
lowerCAmelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase__ = collection[j - 1]
lowerCAmelCase__ = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 668 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_UpperCAmelCase : Any = logging.get_logger(__name__)
def lowerCAmelCase_ (lowercase__ : bool , lowercase__ : bool ) -> List[Any]:
'''simple docstring'''
def run_func(lowercase__ : str ):
@wraps(lowercase__ )
def run_in_eager_mode(*lowercase__ : Optional[int] , **lowercase__ : str ):
return func(*lowercase__ , **lowercase__ )
@wraps(lowercase__ )
@tf.function(experimental_compile=lowercase__ )
def run_in_graph_mode(*lowercase__ : Any , **lowercase__ : Dict ):
return func(*lowercase__ , **lowercase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> ["tf.Tensor"]:
'''simple docstring'''
lowerCAmelCase__ = random.Random()
lowerCAmelCase__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :TensorFlowBenchmarkArguments
UpperCamelCase_ :PretrainedConfig
UpperCamelCase_ :str = "TensorFlow"
@property
def __snake_case ( self : List[str] ):
return tf.__version__
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
# initialize GPU on separate process
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCAmelCase__ = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_inference )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCAmelCase__ = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_train )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCAmelCase__ = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_inference )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCAmelCase__ = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_train )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCAmelCase__ = (
hasattr(SCREAMING_SNAKE_CASE_ , '''architectures''' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase__ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase__ = __import__('''transformers''' , fromlist=[model_class] )
lowerCAmelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
f'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCAmelCase__ = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase__ = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , '''vocab_size''' ) else config.encoder.vocab_size
lowerCAmelCase__ = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCAmelCase__ = (
hasattr(SCREAMING_SNAKE_CASE_ , '''architectures''' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase__ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase__ = __import__('''transformers''' , fromlist=[model_class] )
lowerCAmelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
f'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCAmelCase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase__ = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , '''vocab_size''' ) else config.encoder.vocab_size
lowerCAmelCase__ = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
lowerCAmelCase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(SCREAMING_SNAKE_CASE_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase__ = timeit.repeat(
SCREAMING_SNAKE_CASE_ , repeat=self.args.repeat , number=10 , )
return min(SCREAMING_SNAKE_CASE_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'Doesn\'t fit on GPU. {e}' )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Callable[[], None] ):
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowerCAmelCase__ = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowerCAmelCase__ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase__ = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = meminfo.used
lowerCAmelCase__ = Memory(SCREAMING_SNAKE_CASE_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowerCAmelCase__ = None
else:
lowerCAmelCase__ = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = Memory(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase__ = stop_memory_tracing(SCREAMING_SNAKE_CASE_ )
if memory is None:
lowerCAmelCase__ = summary.total
else:
lowerCAmelCase__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 668 |
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ ) + 1
lowerCAmelCase__ = len(lowercase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowercase__ ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowercase__ ):
for j in range(1 , lowercase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCAmelCase : Union[str, Any] = "aab"
_UpperCAmelCase : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 668 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ (lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
lowerCAmelCase__ = MobileBertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 668 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Tuple = 'swinv2'
UpperCamelCase_ :str = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Any=224 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Any=96 , SCREAMING_SNAKE_CASE_ : Any=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[str]=4.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Any=32 , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowerCAmelCase__ = (0, 0, 0, 0)
| 668 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
_UpperCAmelCase : Any = 8.314462 # Unit - J mol-1 K-1
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 |
from collections import deque
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = process_name # process name
lowerCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ = arrival_time
lowerCAmelCase__ = burst_time # remaining burst time
lowerCAmelCase__ = 0 # total time of the process wait in ready queue
lowerCAmelCase__ = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ):
# total number of mlfq's queues
lowerCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ = queue
# current time
lowerCAmelCase__ = current_time
# finished process is in this sequence queue
lowerCAmelCase__ = deque()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
lowerCAmelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ = 0
# set the finish time
lowerCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process("P1", 0, 53)
_UpperCAmelCase : Tuple = Process("P2", 0, 17)
_UpperCAmelCase : int = Process("P3", 0, 68)
_UpperCAmelCase : str = Process("P4", 0, 24)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = [17, 25]
_UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("P1", 0, 53)
_UpperCAmelCase : List[str] = Process("P2", 0, 17)
_UpperCAmelCase : Any = Process("P3", 0, 68)
_UpperCAmelCase : List[Any] = Process("P4", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : str = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 668 | 1 |
from datetime import datetime
import requests
def lowerCAmelCase_ (lowercase__ : str ) -> bytes:
'''simple docstring'''
lowerCAmelCase__ = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowerCAmelCase__ = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = input("Enter Video/IGTV url: ").strip()
_UpperCAmelCase : List[str] = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 668 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Tuple = 'microsoft/speecht5_tts'
UpperCamelCase_ :List[Any] = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ :str = 'text_reader'
UpperCamelCase_ :Any = SpeechTaProcessor
UpperCamelCase_ :Optional[Any] = SpeechTaForTextToSpeech
UpperCamelCase_ :Dict = SpeechTaHifiGan
UpperCamelCase_ :List[str] = ['text']
UpperCamelCase_ :Optional[Any] = ['audio']
def __snake_case ( self : List[Any] ):
if self.post_processor is None:
lowerCAmelCase__ = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
lowerCAmelCase__ = self.pre_processor(text=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
lowerCAmelCase__ = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[7_305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
with torch.no_grad():
return self.model.generate_speech(**SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ):
with torch.no_grad():
return self.post_processor(SCREAMING_SNAKE_CASE_ ).cpu().detach()
| 668 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase : str = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCAmelCase : List[str] = {
"ctrl": 256,
}
_UpperCAmelCase : int = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Optional[int] = CONTROL_CODES
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 668 | 1 |
from collections import namedtuple
_UpperCAmelCase : Dict = namedtuple("from_to", "from_ to")
_UpperCAmelCase : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : str , lowercase__ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError()
def __snake_case ( self : Union[str, Any] ):
raise NotImplementedError()
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = skip_prompt
lowerCAmelCase__ = decode_kwargs
# variables used in the streaming process
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowerCAmelCase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase__ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase__ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = Queue()
lowerCAmelCase__ = None
lowerCAmelCase__ = timeout
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
return self
def __snake_case ( self : int ):
lowerCAmelCase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 668 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCAmelCase_ :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = str(id_ )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = []
lowerCAmelCase__ = {} # {vertex:distance}
def __lt__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.key < other.key
def __repr__( self : List[Any] ):
return self.id
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
self.neighbors.append(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ = weight
def lowerCAmelCase_ (lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowercase__ )
graph[b - 1].add_edge(graph[a - 1] , lowercase__ )
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : Vertex ) -> list:
'''simple docstring'''
lowerCAmelCase__ = []
for u in graph:
lowerCAmelCase__ = math.inf
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = graph[:]
while q:
lowerCAmelCase__ = min(lowercase__ )
q.remove(lowercase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCAmelCase__ = u
lowerCAmelCase__ = u.edges[v.id]
for i in range(1 , len(lowercase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : Vertex ) -> Iterator[tuple]:
'''simple docstring'''
for u in graph:
lowerCAmelCase__ = math.inf
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = list(lowercase__ )
hq.heapify(lowercase__ )
while h:
lowerCAmelCase__ = hq.heappop(lowercase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCAmelCase__ = u
lowerCAmelCase__ = u.edges[v.id]
hq.heapify(lowercase__ )
for i in range(1 , len(lowercase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCAmelCase_ () -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any:
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = [parquet_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: parquet_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ = {'''image''': [image_path]}
lowerCAmelCase__ = Features({'''image''': Image()} )
lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ )
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
assert get_writer_batch_size(lowercase__ ) == expected
| 668 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
lowerCAmelCase__ = list(range(len(lowercase__ ) ) )
lowerCAmelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_UpperCAmelCase : List[str] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_UpperCAmelCase : List[Any] = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_UpperCAmelCase : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_UpperCAmelCase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_UpperCAmelCase : int = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_UpperCAmelCase : List[Any] = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_UpperCAmelCase : Union[str, Any] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_UpperCAmelCase : int = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_UpperCAmelCase : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_UpperCAmelCase : Union[str, Any] = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_UpperCAmelCase : int = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_UpperCAmelCase : Dict = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_UpperCAmelCase : Any = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_UpperCAmelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_UpperCAmelCase : int = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_UpperCAmelCase : Any = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_UpperCAmelCase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_UpperCAmelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_UpperCAmelCase : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_UpperCAmelCase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_UpperCAmelCase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_UpperCAmelCase : Dict = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_UpperCAmelCase : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_UpperCAmelCase : Tuple = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_UpperCAmelCase : Any = ""
_UpperCAmelCase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_UpperCAmelCase : List[Any] = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[Any] , lowercase__ : Dict ) -> Tuple:
'''simple docstring'''
assert ReadMe.from_string(lowercase__ , lowercase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCAmelCase_ (lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(lowercase__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
lowerCAmelCase__ = ReadMe.from_string(lowercase__ , lowercase__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : int ) -> int:
'''simple docstring'''
with pytest.raises(lowercase__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCAmelCase_ (lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
ReadMe.from_string(lowercase__ , lowercase__ , suppress_parsing_errors=lowercase__ )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : int ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = Path(lowercase__ ) / '''README.md'''
with open(lowercase__ , '''w+''' ) as readme_file:
readme_file.write(lowercase__ )
lowerCAmelCase__ = ReadMe.from_readme(lowercase__ , lowercase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = Path(lowercase__ ) / '''README.md'''
with open(lowercase__ , '''w+''' ) as readme_file:
readme_file.write(lowercase__ )
lowerCAmelCase__ = expected_error.format(path=lowercase__ )
with pytest.raises(lowercase__ , match=re.escape(lowercase__ ) ):
lowerCAmelCase__ = ReadMe.from_readme(lowercase__ , lowercase__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = Path(lowercase__ ) / '''README.md'''
with open(lowercase__ , '''w+''' ) as readme_file:
readme_file.write(lowercase__ )
lowerCAmelCase__ = expected_error.format(path=lowercase__ )
with pytest.raises(lowercase__ , match=re.escape(lowercase__ ) ):
ReadMe.from_readme(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = Path(lowercase__ ) / '''README.md'''
with open(lowercase__ , '''w+''' ) as readme_file:
readme_file.write(lowercase__ )
ReadMe.from_readme(lowercase__ , lowercase__ , suppress_parsing_errors=lowercase__ )
| 668 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any:
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = [parquet_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: parquet_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ = {'''image''': [image_path]}
lowerCAmelCase__ = Features({'''image''': Image()} )
lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ )
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
assert get_writer_batch_size(lowercase__ ) == expected
| 668 | 1 |
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCAmelCase__ = n - k
# Calculate C(n,k)
for i in range(lowercase__ ):
result *= n - i
result //= i + 1
return result
def lowerCAmelCase_ (lowercase__ : int ) -> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , lowercase__ ) // (node_count + 1)
def lowerCAmelCase_ (lowercase__ : int ) -> int:
'''simple docstring'''
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCAmelCase_ (lowercase__ : int ) -> int:
'''simple docstring'''
return catalan_number(lowercase__ ) * factorial(lowercase__ )
if __name__ == "__main__":
_UpperCAmelCase : Dict = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 668 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_UpperCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str
UpperCamelCase_ :List[str]
UpperCamelCase_ :Optional[List[str]]
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :List[int]
UpperCamelCase_ :List[int]
UpperCamelCase_ :Optional[List[int]] = None
UpperCamelCase_ :Optional[List[int]] = None
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = 'train'
UpperCamelCase_ :Dict = 'dev'
UpperCamelCase_ :Dict = 'test'
class lowerCAmelCase_ :
@staticmethod
def __snake_case ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ):
raise NotImplementedError
@staticmethod
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[InputExample] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]="[CLS]" , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : str="[SEP]" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : int=-100 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , ):
lowerCAmelCase__ = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE_ )}
lowerCAmelCase__ = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE_ ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for word, label in zip(example.words , example.labels ):
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCAmelCase__ = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE_ ) > max_seq_length - special_tokens_count:
lowerCAmelCase__ = tokens[: (max_seq_length - special_tokens_count)]
lowerCAmelCase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCAmelCase__ = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCAmelCase__ = [cls_token] + tokens
lowerCAmelCase__ = [pad_token_label_id] + label_ids
lowerCAmelCase__ = [cls_token_segment_id] + segment_ids
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCAmelCase__ = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE_ )
# Zero-pad up to the sequence length.
lowerCAmelCase__ = max_seq_length - len(SCREAMING_SNAKE_CASE_ )
if pad_on_left:
lowerCAmelCase__ = ([pad_token] * padding_length) + input_ids
lowerCAmelCase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCAmelCase__ = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCAmelCase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE_ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE_ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE_ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE_ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE_ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE_ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE_ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE_ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase__ = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , label_ids=SCREAMING_SNAKE_CASE_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :List[InputFeatures]
UpperCamelCase_ :int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : TokenClassificationTask , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Split = Split.train , ):
# Load data features from cache or dataset file
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase__ = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowerCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowerCAmelCase__ = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase__ = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE_ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , SCREAMING_SNAKE_CASE_ )
def __len__( self : List[str] ):
return len(self.features )
def __getitem__( self : str , SCREAMING_SNAKE_CASE_ : Tuple ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
UpperCamelCase_ :List[InputFeatures]
UpperCamelCase_ :int = -100
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : TokenClassificationTask , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Split = Split.train , ):
lowerCAmelCase__ = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCAmelCase__ = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE_ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCAmelCase__ = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCAmelCase__ = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __snake_case ( self : Any ):
lowerCAmelCase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[str] ):
return len(self.features )
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
return self.features[i]
| 668 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Union[str, Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase__ )
trainer.save_metrics('''train''' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('''eval''' , lowercase__ )
trainer.save_metrics('''eval''' , lowercase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowercase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowercase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = 'MCTCTFeatureExtractor'
UpperCamelCase_ :Optional[int] = 'AutoTokenizer'
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
def __call__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Any ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase__ = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase__ = kwargs.pop('''audio''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = kwargs.pop('''sampling_rate''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = kwargs.pop('''text''' , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None:
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings['''input_ids''']
return inputs
def __snake_case ( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = kwargs.pop('''input_features''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = kwargs.pop('''labels''' , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if input_features is not None:
lowerCAmelCase__ = self.feature_extractor.pad(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if labels is not None:
lowerCAmelCase__ = self.tokenizer.pad(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase__ = labels['''input_ids''']
return input_features
def __snake_case ( self : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@contextmanager
def __snake_case ( self : Any ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 668 |
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 668 | 1 |
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : str=None ):
# Input as list
lowerCAmelCase__ = list(poly_a or [0] )[:]
lowerCAmelCase__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase__ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase__ = self.__multiply()
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return dft[0]
#
lowerCAmelCase__ = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase__ = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ = self.root**next_ncol
# First half of next step
lowerCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase__ = new_dft
lowerCAmelCase__ = next_ncol // 2
return dft[0]
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.__dft('''A''' )
lowerCAmelCase__ = self.__dft('''B''' )
lowerCAmelCase__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase__ = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase__ = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ = self.root ** (next_ncol // 2)
lowerCAmelCase__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase__ = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase__ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : List[Any] ):
lowerCAmelCase__ = '''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase__ = '''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase__ = '''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase_ :
@staticmethod
def __snake_case ( *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ):
pass
def lowerCAmelCase_ (lowercase__ : Image ) -> str:
'''simple docstring'''
lowerCAmelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCAmelCase_ (lowercase__ : Image ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = np.array(lowercase__ )
lowerCAmelCase__ = npimg.shape
return {"hash": hashimage(lowercase__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCamelCase_ :Union[str, Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase_ :str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase__ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : List[Any] ):
pass
@slow
@require_torch
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
lowerCAmelCase__ = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase__ = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = '''facebook/sam-vit-huge'''
lowerCAmelCase__ = pipeline('''mask-generation''' , model=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase__ = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 668 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : int = Mapping[str, np.ndarray]
_UpperCAmelCase : Optional[Any] = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class lowerCAmelCase_ :
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ :Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ :Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCamelCase_ :Optional[Sequence[int]] = None
def lowerCAmelCase_ (lowercase__ : str ) -> Protein:
'''simple docstring'''
lowerCAmelCase__ = r'''(\[[A-Z]+\]\n)'''
lowerCAmelCase__ = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowerCAmelCase__ = ["N", "CA", "C"]
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase__ = g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase__ = '''X''' # FIXME: strings are immutable
lowerCAmelCase__ = np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
lowerCAmelCase__ = np.array(lowercase__ )
lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase__ = np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : int = 0 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
lowerCAmelCase__ = prot.parents
lowerCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase__ = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
lowerCAmelCase__ = ['''N/A''']
pdb_headers.append(f'PARENT {" ".join(lowercase__ )}' )
return pdb_headers
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = pdb_str.split('''\n''' )
lowerCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
lowerCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase__ = []
if prot.parents_chain_index is not None:
lowerCAmelCase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
lowerCAmelCase__ = max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase__ = parent_dict.get(str(lowercase__ ) , ['''N/A'''] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase__ = [['''N/A''']]
def make_parent_line(lowercase__ : Sequence[str] ) -> str:
return f'PARENT {" ".join(lowercase__ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase__ = 0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
lowerCAmelCase__ = parents_per_chain[chain_counter]
else:
lowerCAmelCase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> str:
'''simple docstring'''
lowerCAmelCase__ = residue_constants.restypes + ['''X''']
def res_atoa(lowercase__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowerCAmelCase__ = residue_constants.atom_types
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.atom_mask
lowerCAmelCase__ = prot.aatype
lowerCAmelCase__ = prot.atom_positions
lowerCAmelCase__ = prot.residue_index.astype(np.intaa )
lowerCAmelCase__ = prot.b_factors
lowerCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowerCAmelCase__ = get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
lowerCAmelCase__ = aatype.shape[0]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = string.ascii_uppercase
lowerCAmelCase__ = None
# Add all atom sites.
for i in range(lowercase__ ):
lowerCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase__ = '''ATOM'''
lowerCAmelCase__ = atom_name if len(lowercase__ ) == 4 else f' {atom_name}'
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1.00
lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''A'''
if chain_index is not None:
lowerCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase__ = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
lowerCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase__ = True
lowerCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase__ = '''TER'''
lowerCAmelCase__ = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ (lowercase__ : FeatureDict , lowercase__ : ModelOutput , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional[Sequence[str]] = None , lowercase__ : Optional[Sequence[int]] = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 668 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Optional[int] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Optional[Any] = 'ernie_m'
UpperCamelCase_ :Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int = 250_002 , SCREAMING_SNAKE_CASE_ : int = 768 , SCREAMING_SNAKE_CASE_ : int = 12 , SCREAMING_SNAKE_CASE_ : int = 12 , SCREAMING_SNAKE_CASE_ : int = 3_072 , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 514 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : float = 1e-05 , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , **SCREAMING_SNAKE_CASE_ : Any , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = is_decoder
lowerCAmelCase__ = act_dropout
| 668 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : int ) -> int:
'''simple docstring'''
if exitstatus == 5:
lowerCAmelCase__ = 0
# Doctest custom flag to ignore output.
_UpperCAmelCase : Any = doctest.register_optionflag("IGNORE_RESULT")
_UpperCAmelCase : Dict = doctest.OutputChecker
class lowerCAmelCase_ ( snake_case__ ):
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Union[str, Any] = CustomOutputChecker
_UpperCAmelCase : Dict = HfDoctestModule
_UpperCAmelCase : List[str] = HfDocTestParser
| 668 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : int , lowercase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split('''.''' ):
lowerCAmelCase__ = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
lowerCAmelCase__ = getattr(lowercase__ , lowercase__ ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "inv_freq":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowercase__ )[0].split('''.''' )[-2]
lowerCAmelCase__ = mapped_key.replace('''*''' , lowercase__ )
if "pos_bias_u" in name:
lowerCAmelCase__ = None
elif "pos_bias_v" in name:
lowerCAmelCase__ = None
elif "weight_g" in name:
lowerCAmelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = '''weight'''
elif "running_mean" in name:
lowerCAmelCase__ = '''running_mean'''
elif "inv_freq" in name:
lowerCAmelCase__ = '''inv_freq'''
elif "running_var" in name:
lowerCAmelCase__ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCAmelCase__ = '''num_batches_tracked'''
else:
lowerCAmelCase__ = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any , lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase__ = name.split('''.''' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCAmelCase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : Dict=True ) -> Tuple:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase__ = WavaVecaConformerConfig.from_pretrained(lowercase__ , hidden_act='''swish''' )
else:
lowerCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ = '''rotary'''
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowercase__ , '''vocab.json''' )
if not os.path.isdir(lowercase__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase__ , lowercase__ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase__ , )
lowerCAmelCase__ = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
lowerCAmelCase__ = WavaVecaConformerForCTC(lowercase__ )
else:
lowerCAmelCase__ = WavaVecaConformerForPreTraining(lowercase__ )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task='''audio_pretraining''' )
lowerCAmelCase__ = fairseq.tasks.setup_task(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase__ )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 668 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
UpperCamelCase_ :List[str] = 'nat'
UpperCamelCase_ :Tuple = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=64 , SCREAMING_SNAKE_CASE_ : int=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE_ : Tuple=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : str=3.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=None , **SCREAMING_SNAKE_CASE_ : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = kernel_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowerCAmelCase__ = layer_scale_init_value
lowerCAmelCase__ = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 668 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=[30, 30] , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=32 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : List[str]=37 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=10 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=8 , SCREAMING_SNAKE_CASE_ : Dict=10 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = n_targets
lowerCAmelCase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowerCAmelCase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowerCAmelCase__ = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowerCAmelCase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowerCAmelCase__ = []
for i in range(self.batch_size ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.rand(self.n_targets , 4 , device=SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : List[Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = YolosModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(pixel_values=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowerCAmelCase__ = model(pixel_values=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self : Any ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Dict = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCamelCase_ :Union[str, Any] = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
UpperCamelCase_ :Union[str, Any] = False
UpperCamelCase_ :Any = False
UpperCamelCase_ :List[str] = False
UpperCamelCase_ :List[Any] = False
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=False ):
lowerCAmelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowerCAmelCase__ = []
for i in range(self.model_tester.batch_size ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = torch.ones(
size=(self.model_tester.n_targets,) , device=SCREAMING_SNAKE_CASE_ , dtype=torch.long )
lowerCAmelCase__ = torch.ones(
self.model_tester.n_targets , 4 , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float )
labels.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = labels
return inputs_dict
def __snake_case ( self : Any ):
lowerCAmelCase__ = YolosModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self : Dict ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def __snake_case ( self : int ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
# in YOLOS, the seq_len is different
lowerCAmelCase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = 1
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self : Union[str, Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# YOLOS has a different seq_length
lowerCAmelCase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Optional[int] ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = YolosModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ () -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __snake_case ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self : Any ):
lowerCAmelCase__ = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(inputs.pixel_values )
# verify outputs
lowerCAmelCase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify postprocessing
lowerCAmelCase__ = image_processor.post_process_object_detection(
SCREAMING_SNAKE_CASE_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowerCAmelCase__ = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [75, 75, 17, 63, 17]
lowerCAmelCase__ = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , SCREAMING_SNAKE_CASE_ ) )
| 668 |
from typing import Any
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None:
'''simple docstring'''
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase__ = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase__ = '''nested dictionary ''' if nested else ''''''
lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase_ :
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return None
class lowerCAmelCase_ :
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
return None
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCamelCase_ :Dict = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __snake_case ( self : Optional[int] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def __snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def __snake_case ( self : List[Any] ):
from transformers import BertModel
lowerCAmelCase__ = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
lowerCAmelCase__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCAmelCase__ = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def __snake_case ( self : str ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase__ = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def __snake_case ( self : Optional[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase__ = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCAmelCase__ = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def __snake_case ( self : Union[str, Any] ):
from transformers import BertModel
lowerCAmelCase__ = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowerCAmelCase__ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def __snake_case ( self : Union[str, Any] ):
from transformers import TFBertModel
lowerCAmelCase__ = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowerCAmelCase__ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def __snake_case ( self : Any ):
lowerCAmelCase__ = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
lowerCAmelCase__ = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
lowerCAmelCase__ , lowerCAmelCase__ = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCAmelCase__ , lowerCAmelCase__ = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 668 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_048 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Dict=[16, 16] , SCREAMING_SNAKE_CASE_ : Tuple=128 , SCREAMING_SNAKE_CASE_ : Optional[Any]=44_100 , SCREAMING_SNAKE_CASE_ : Optional[int]=86 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_048 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spectrogram_length
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = feature_size // self.patch_size[1]
lowerCAmelCase__ = n_fft
lowerCAmelCase__ = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : np.array ):
lowerCAmelCase__ = spectrogram(
SCREAMING_SNAKE_CASE_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCAmelCase__ = log_spec[:, :-1]
lowerCAmelCase__ = log_spec - 20.0
lowerCAmelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ = np.ones([len(SCREAMING_SNAKE_CASE_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = audio_features[i]
lowerCAmelCase__ = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features}
lowerCAmelCase__ = BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
return encoded_inputs
| 668 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCAmelCase : Union[str, Any] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
_UpperCAmelCase : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
_UpperCAmelCase : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
_UpperCAmelCase : Union[str, Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def __snake_case ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Any ):
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int]=0.9 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : int=0.5 ):
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowerCAmelCase__ = [
meteor_score.single_meteor_score(
word_tokenize(SCREAMING_SNAKE_CASE_ ) , word_tokenize(SCREAMING_SNAKE_CASE_ ) , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , gamma=SCREAMING_SNAKE_CASE_ )
for ref, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
else:
lowerCAmelCase__ = [
meteor_score.single_meteor_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , gamma=SCREAMING_SNAKE_CASE_ )
for ref, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
return {"meteor": np.mean(SCREAMING_SNAKE_CASE_ )}
| 668 |
from collections import namedtuple
_UpperCAmelCase : Dict = namedtuple("from_to", "from_ to")
_UpperCAmelCase : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : str , lowercase__ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = collection[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = i - 1
while low <= high:
lowerCAmelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase__ = collection[j - 1]
lowerCAmelCase__ = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 668 | 1 |
import numpy as np
import datasets
_UpperCAmelCase : List[Any] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
_UpperCAmelCase : Optional[Any] = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
_UpperCAmelCase : int = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def __snake_case ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
# convert to numpy arrays
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
lowerCAmelCase__ = X - np.mean(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = np.cov(reference_distribution.T )
try:
lowerCAmelCase__ = np.linalg.inv(SCREAMING_SNAKE_CASE_ )
except np.linalg.LinAlgError:
lowerCAmelCase__ = np.linalg.pinv(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = np.dot(SCREAMING_SNAKE_CASE_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 668 |
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ ) + 1
lowerCAmelCase__ = len(lowercase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowercase__ ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowercase__ ):
for j in range(1 , lowercase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCAmelCase : Union[str, Any] = "aab"
_UpperCAmelCase : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 668 | 1 |
def lowerCAmelCase_ (lowercase__ : int = 10 , lowercase__ : int = 10_00 , lowercase__ : bool = True ) -> int:
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
and isinstance(lowercase__ , lowercase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> None:
'''simple docstring'''
assert (
isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(lowercase__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
lowerCAmelCase__ = lower
lowerCAmelCase__ = higher
lowerCAmelCase__ = []
while True:
lowerCAmelCase__ = get_avg(lowercase__ , lowercase__ )
last_numbers.append(lowercase__ )
if answer(lowercase__ ) == "low":
lowerCAmelCase__ = number
elif answer(lowercase__ ) == "high":
lowerCAmelCase__ = number
else:
break
print(f'guess the number : {last_numbers[-1]}' )
print(f'details : {last_numbers!s}' )
def lowerCAmelCase_ () -> None:
'''simple docstring'''
lowerCAmelCase__ = int(input('''Enter lower value : ''' ).strip() )
lowerCAmelCase__ = int(input('''Enter high value : ''' ).strip() )
lowerCAmelCase__ = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 668 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowerCAmelCase_ (lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f'{test_file} instead.' )
lowerCAmelCase__ = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
lowerCAmelCase__ = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCAmelCase__ = '''.'''.join(lowercase__ )
return test_module_path
def lowerCAmelCase_ (lowercase__ : Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = get_module_path(lowercase__ )
lowerCAmelCase__ = importlib.import_module(lowercase__ )
return test_module
def lowerCAmelCase_ (lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = get_test_module(lowercase__ )
for attr in dir(lowercase__ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(lowercase__ , lowercase__ ) )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def lowerCAmelCase_ (lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = get_test_module(lowercase__ )
for attr in dir(lowercase__ ):
lowerCAmelCase__ = getattr(lowercase__ , lowercase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCAmelCase__ = getattr(lowercase__ , '''all_model_classes''' , [] )
if len(lowercase__ ) > 0:
test_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def lowerCAmelCase_ (lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = get_test_classes(lowercase__ )
lowerCAmelCase__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def lowerCAmelCase_ (lowercase__ : Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase__ = test_class()
if hasattr(lowercase__ , '''setUp''' ):
test.setUp()
lowerCAmelCase__ = None
if hasattr(lowercase__ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCAmelCase__ = test.model_tester.__class__
return model_tester
def lowerCAmelCase_ (lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = get_test_classes(lowercase__ )
lowerCAmelCase__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def lowerCAmelCase_ (lowercase__ : Optional[Any] , lowercase__ : Any ) -> str:
'''simple docstring'''
lowerCAmelCase__ = get_test_classes_for_model(lowercase__ , lowercase__ )
lowerCAmelCase__ = []
for test_class in test_classes:
lowerCAmelCase__ = get_model_tester_from_test_class(lowercase__ )
if tester_class is not None:
tester_classes.append(lowercase__ )
# sort with class names
return sorted(lowercase__ , key=lambda lowercase__ : x.__name__ )
def lowerCAmelCase_ (lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = get_test_classes(lowercase__ )
lowerCAmelCase__ = {test_class: get_model_tester_from_test_class(lowercase__ ) for test_class in test_classes}
return test_tester_mapping
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = get_model_classes(lowercase__ )
lowerCAmelCase__ = {
model_class: get_test_classes_for_model(lowercase__ , lowercase__ ) for model_class in model_classes
}
return model_test_mapping
def lowerCAmelCase_ (lowercase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = get_model_classes(lowercase__ )
lowerCAmelCase__ = {
model_class: get_tester_classes_for_model(lowercase__ , lowercase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCAmelCase_ (lowercase__ : Any ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
return o
elif isinstance(lowercase__ , lowercase__ ):
return o.__name__
elif isinstance(lowercase__ , (list, tuple) ):
return [to_json(lowercase__ ) for x in o]
elif isinstance(lowercase__ , lowercase__ ):
return {to_json(lowercase__ ): to_json(lowercase__ ) for k, v in o.items()}
else:
return o
| 668 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
import random
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : int ) -> tuple:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = [], [], []
for element in data:
if element < pivot:
less.append(lowercase__ )
elif element > pivot:
greater.append(lowercase__ )
else:
equal.append(lowercase__ )
return less, equal, greater
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : int ) -> Dict:
'''simple docstring'''
if index >= len(lowercase__ ) or index < 0:
return None
lowerCAmelCase__ = items[random.randint(0 , len(lowercase__ ) - 1 )]
lowerCAmelCase__ = 0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _partition(lowercase__ , lowercase__ )
lowerCAmelCase__ = len(lowercase__ )
lowerCAmelCase__ = len(lowercase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowercase__ , lowercase__ )
# must be in larger
else:
return quick_select(lowercase__ , index - (m + count) )
| 668 |
from collections import deque
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = process_name # process name
lowerCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ = arrival_time
lowerCAmelCase__ = burst_time # remaining burst time
lowerCAmelCase__ = 0 # total time of the process wait in ready queue
lowerCAmelCase__ = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ):
# total number of mlfq's queues
lowerCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ = queue
# current time
lowerCAmelCase__ = current_time
# finished process is in this sequence queue
lowerCAmelCase__ = deque()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
lowerCAmelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ = 0
# set the finish time
lowerCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process("P1", 0, 53)
_UpperCAmelCase : Tuple = Process("P2", 0, 17)
_UpperCAmelCase : int = Process("P3", 0, 68)
_UpperCAmelCase : str = Process("P4", 0, 24)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = [17, 25]
_UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("P1", 0, 53)
_UpperCAmelCase : List[str] = Process("P2", 0, 17)
_UpperCAmelCase : Any = Process("P3", 0, 68)
_UpperCAmelCase : List[Any] = Process("P4", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : str = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 668 | 1 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : str , lowercase__ : int , lowercase__ : Dict , lowercase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
lowerCAmelCase__ = json.load(lowercase__ )
lowerCAmelCase__ = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowerCAmelCase__ = torch.load(lowercase__ , map_location='''cpu''' )
# Load the entity vocab file
lowerCAmelCase__ = load_entity_vocab(lowercase__ )
lowerCAmelCase__ = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase__ = AddedToken('''<ent>''' , lstrip=lowercase__ , rstrip=lowercase__ )
lowerCAmelCase__ = AddedToken('''<ent2>''' , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase__ , lowercase__ )
lowerCAmelCase__ = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
lowerCAmelCase__ = state_dict['''embeddings.word_embeddings.weight''']
lowerCAmelCase__ = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
lowerCAmelCase__ = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
lowerCAmelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase__ = f'encoder.layer.{layer_index}.attention.self.'
lowerCAmelCase__ = state_dict[prefix + matrix_name]
lowerCAmelCase__ = state_dict[prefix + matrix_name]
lowerCAmelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase__ = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowerCAmelCase__ = entity_emb[entity_vocab['''[MASK]''']]
lowerCAmelCase__ = LukeModel(config=lowercase__ ).eval()
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
lowerCAmelCase__ = LukeTokenizer.from_pretrained(lowercase__ , task='''entity_classification''' )
lowerCAmelCase__ = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
lowerCAmelCase__ = (39, 42)
lowerCAmelCase__ = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors='''pt''' )
lowerCAmelCase__ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
lowerCAmelCase__ = torch.Size((1, 42, 10_24) )
lowerCAmelCase__ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
lowerCAmelCase__ = torch.Size((1, 42, 7_68) )
lowerCAmelCase__ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCAmelCase__ = torch.Size((1, 1, 10_24) )
lowerCAmelCase__ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
lowerCAmelCase__ = torch.Size((1, 1, 7_68) )
lowerCAmelCase__ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(lowercase__ ):
lowerCAmelCase__ , lowerCAmelCase__ = line.rstrip().split('''\t''' )
lowerCAmelCase__ = index
return entity_vocab
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_UpperCAmelCase : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 668 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = LEDTokenizer
UpperCamelCase_ :int = LEDTokenizerFast
UpperCamelCase_ :int = True
def __snake_case ( self : str ):
super().setUp()
lowerCAmelCase__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCAmelCase__ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def __snake_case ( self : List[Any] , **SCREAMING_SNAKE_CASE_ : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return "lower newer", "lower newer"
@cached_property
def __snake_case ( self : Optional[Any] ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __snake_case ( self : List[Any] ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase__ = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , max_length=len(SCREAMING_SNAKE_CASE_ ) , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_torch
def __snake_case ( self : Dict ):
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , SCREAMING_SNAKE_CASE_ )
self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('''labels''' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('''decoder_attention_mask''' , SCREAMING_SNAKE_CASE_ )
@require_torch
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __snake_case ( self : str ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def __snake_case ( self : Dict ):
lowerCAmelCase__ = ['''A long paragraph for summarization.''']
lowerCAmelCase__ = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowerCAmelCase__ = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowerCAmelCase__ = inputs['''input_ids''']
lowerCAmelCase__ = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __snake_case ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase__ = ['''Summary of the text.''', '''Another summary.''']
lowerCAmelCase__ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [[0] * len(SCREAMING_SNAKE_CASE_ ) for x in encoded_output['''input_ids''']]
lowerCAmelCase__ = tokenizer.pad(SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
pass
def __snake_case ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''A, <mask> AllenNLP sentence.'''
lowerCAmelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 668 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase : str = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCAmelCase : List[str] = {
"ctrl": 256,
}
_UpperCAmelCase : int = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Optional[int] = CONTROL_CODES
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 668 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def __snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __snake_case ( self : str ):
lowerCAmelCase__ = self.dummy_uncond_unet
lowerCAmelCase__ = ScoreSdeVeScheduler()
lowerCAmelCase__ = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
sde_ve.to(SCREAMING_SNAKE_CASE_ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE_ ).images
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )[
0
]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Any ):
lowerCAmelCase__ = '''google/ncsnpp-church-256'''
lowerCAmelCase__ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
sde_ve.to(SCREAMING_SNAKE_CASE_ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 668 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError()
def __snake_case ( self : Union[str, Any] ):
raise NotImplementedError()
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = skip_prompt
lowerCAmelCase__ = decode_kwargs
# variables used in the streaming process
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowerCAmelCase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase__ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase__ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = Queue()
lowerCAmelCase__ = None
lowerCAmelCase__ = timeout
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
return self
def __snake_case ( self : int ):
lowerCAmelCase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 668 | 1 |
_UpperCAmelCase : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_UpperCAmelCase : Dict = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_UpperCAmelCase : Dict = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_UpperCAmelCase : List[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_UpperCAmelCase : Any = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_UpperCAmelCase : int = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_UpperCAmelCase : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_UpperCAmelCase : Any = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 668 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = XGLMTokenizer
UpperCamelCase_ :Union[str, Any] = XGLMTokenizerFast
UpperCamelCase_ :str = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XGLMTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Any ):
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_008 )
def __snake_case ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def __snake_case ( self : Any ):
lowerCAmelCase__ = XGLMTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __snake_case ( self : Optional[int] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __snake_case ( self : int ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name )
lowerCAmelCase__ = XGLMTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = pickle.dumps(SCREAMING_SNAKE_CASE_ )
pickle.loads(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [2, 31_227, 4_447, 35]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
lowerCAmelCase__ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def __snake_case ( self : Optional[Any] ):
# fmt: off
lowerCAmelCase__ = {
'''input_ids''': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''facebook/xglm-564M''' , padding=SCREAMING_SNAKE_CASE_ , )
| 668 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
lowerCAmelCase__ = list(range(len(lowercase__ ) ) )
lowerCAmelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int = 101 ):
lowerCAmelCase__ = length
def __len__( self : str ):
return self.length
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ):
return i
class lowerCAmelCase_ :
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return {"input_ids": torch.tensor(SCREAMING_SNAKE_CASE_ ), "labels": torch.tensor(SCREAMING_SNAKE_CASE_ )}
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCAmelCase__ = nn.Linear(120 , 80 )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase_ ( snake_case__ ):
@require_torch_neuroncore
def __snake_case ( self : int ):
lowerCAmelCase__ = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = f'--output_dir {output_dir}'.split()
lowerCAmelCase__ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase_ ( snake_case__ ):
@require_torch_multi_gpu
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = f'--output_dir {output_dir}'.split()
lowerCAmelCase__ = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_UpperCAmelCase : Dict = HfArgumentParser((TrainingArguments,))
_UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_UpperCAmelCase : List[Any] = DummyDataset(dataset_length)
def lowerCAmelCase_ (lowercase__ : EvalPrediction ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = list(range(len(lowercase__ ) ) )
lowerCAmelCase__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
_UpperCAmelCase : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_UpperCAmelCase : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_UpperCAmelCase : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_UpperCAmelCase : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_UpperCAmelCase : str = None
| 668 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any:
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = [parquet_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: parquet_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ = {'''image''': [image_path]}
lowerCAmelCase__ = Features({'''image''': Image()} )
lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ )
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
assert get_writer_batch_size(lowercase__ ) == expected
| 668 | 1 |
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = len(set_a.intersection(lowercase__ ) )
if alternative_union:
lowerCAmelCase__ = len(lowercase__ ) + len(lowercase__ )
else:
lowerCAmelCase__ = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
lowerCAmelCase__ = [element for element in set_a if element in set_b]
if alternative_union:
lowerCAmelCase__ = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
lowerCAmelCase__ = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = {"a", "b", "c", "d", "e"}
_UpperCAmelCase : Union[str, Any] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 668 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Union[str, Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase__ )
trainer.save_metrics('''train''' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('''eval''' , lowercase__ )
trainer.save_metrics('''eval''' , lowercase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowercase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowercase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError()
def __snake_case ( self : Union[str, Any] ):
raise NotImplementedError()
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = skip_prompt
lowerCAmelCase__ = decode_kwargs
# variables used in the streaming process
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowerCAmelCase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase__ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase__ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = Queue()
lowerCAmelCase__ = None
lowerCAmelCase__ = timeout
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
return self
def __snake_case ( self : int ):
lowerCAmelCase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 668 |
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 668 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCAmelCase_ ( snake_case__ , snake_case__ ):
@register_to_config
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int = 768 , ):
super().__init__()
lowerCAmelCase__ = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE_ ) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, torch.device]] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.dtype] = None , ):
lowerCAmelCase__ = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
return self
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = (embeds * self.std) + self.mean
return embeds
| 668 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668 | 1 |
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : int = Mapping[str, np.ndarray]
_UpperCAmelCase : Optional[Any] = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class lowerCAmelCase_ :
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ :Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ :Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCamelCase_ :Optional[Sequence[int]] = None
def lowerCAmelCase_ (lowercase__ : str ) -> Protein:
'''simple docstring'''
lowerCAmelCase__ = r'''(\[[A-Z]+\]\n)'''
lowerCAmelCase__ = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowerCAmelCase__ = ["N", "CA", "C"]
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase__ = g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase__ = '''X''' # FIXME: strings are immutable
lowerCAmelCase__ = np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
lowerCAmelCase__ = np.array(lowercase__ )
lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase__ = np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : int = 0 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
lowerCAmelCase__ = prot.parents
lowerCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase__ = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
lowerCAmelCase__ = ['''N/A''']
pdb_headers.append(f'PARENT {" ".join(lowercase__ )}' )
return pdb_headers
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = pdb_str.split('''\n''' )
lowerCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
lowerCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase__ = []
if prot.parents_chain_index is not None:
lowerCAmelCase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
lowerCAmelCase__ = max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase__ = parent_dict.get(str(lowercase__ ) , ['''N/A'''] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase__ = [['''N/A''']]
def make_parent_line(lowercase__ : Sequence[str] ) -> str:
return f'PARENT {" ".join(lowercase__ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase__ = 0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
lowerCAmelCase__ = parents_per_chain[chain_counter]
else:
lowerCAmelCase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> str:
'''simple docstring'''
lowerCAmelCase__ = residue_constants.restypes + ['''X''']
def res_atoa(lowercase__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowerCAmelCase__ = residue_constants.atom_types
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.atom_mask
lowerCAmelCase__ = prot.aatype
lowerCAmelCase__ = prot.atom_positions
lowerCAmelCase__ = prot.residue_index.astype(np.intaa )
lowerCAmelCase__ = prot.b_factors
lowerCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowerCAmelCase__ = get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
lowerCAmelCase__ = aatype.shape[0]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = string.ascii_uppercase
lowerCAmelCase__ = None
# Add all atom sites.
for i in range(lowercase__ ):
lowerCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase__ = '''ATOM'''
lowerCAmelCase__ = atom_name if len(lowercase__ ) == 4 else f' {atom_name}'
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1.00
lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''A'''
if chain_index is not None:
lowerCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase__ = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
lowerCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase__ = True
lowerCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase__ = '''TER'''
lowerCAmelCase__ = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ (lowercase__ : FeatureDict , lowercase__ : ModelOutput , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional[Sequence[str]] = None , lowercase__ : Optional[Sequence[int]] = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 668 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : str = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ (lowercase__ : Tuple ) -> str:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
| 668 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : int ) -> int:
'''simple docstring'''
if exitstatus == 5:
lowerCAmelCase__ = 0
# Doctest custom flag to ignore output.
_UpperCAmelCase : Any = doctest.register_optionflag("IGNORE_RESULT")
_UpperCAmelCase : Dict = doctest.OutputChecker
class lowerCAmelCase_ ( snake_case__ ):
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Union[str, Any] = CustomOutputChecker
_UpperCAmelCase : Dict = HfDoctestModule
_UpperCAmelCase : List[str] = HfDocTestParser
| 668 | 1 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : list[float] ) -> bool:
'''simple docstring'''
if len(lowercase__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowerCAmelCase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 668 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=7 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : List[str]=18 , SCREAMING_SNAKE_CASE_ : Optional[int]=30 , SCREAMING_SNAKE_CASE_ : Optional[Any]=400 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size_divisor
lowerCAmelCase__ = do_rescale
def __snake_case ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Union[str, Any] = GLPNImageProcessor if is_vision_available() else None
def __snake_case ( self : str ):
lowerCAmelCase__ = GLPNImageProcessingTester(self )
@property
def __snake_case ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size_divisor''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''resample''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_rescale''' ) )
def __snake_case ( self : Tuple ):
pass
def __snake_case ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self : int ):
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self : Optional[int] ):
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 668 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :str = 'levit'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]=224 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Dict=16 , SCREAMING_SNAKE_CASE_ : Optional[Any]=[128, 256, 384] , SCREAMING_SNAKE_CASE_ : str=[4, 8, 12] , SCREAMING_SNAKE_CASE_ : Any=[4, 4, 4] , SCREAMING_SNAKE_CASE_ : List[Any]=[16, 16, 16] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=[2, 2, 2] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , **SCREAMING_SNAKE_CASE_ : Tuple , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = kernel_size
lowerCAmelCase__ = stride
lowerCAmelCase__ = padding
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = depths
lowerCAmelCase__ = key_dim
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = attention_ratio
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :List[Any] = version.parse('1.11' )
@property
def __snake_case ( self : Any ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __snake_case ( self : List[Any] ):
return 1e-4
| 668 |
from typing import Any
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None:
'''simple docstring'''
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase__ = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase__ = '''nested dictionary ''' if nested else ''''''
lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_UpperCAmelCase : List[Any] = True
except ImportError:
_UpperCAmelCase : str = False
try:
from torch.hub import _get_torch_home
_UpperCAmelCase : List[str] = _get_torch_home()
except ImportError:
_UpperCAmelCase : Union[str, Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
_UpperCAmelCase : int = os.path.join(torch_cache_home, "transformers")
_UpperCAmelCase : int = "https://cdn.huggingface.co"
_UpperCAmelCase : Any = "https://s3.amazonaws.com/models.huggingface.co/bert"
_UpperCAmelCase : Dict = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
_UpperCAmelCase : Any = os.path.join(PATH, "config.yaml")
_UpperCAmelCase : List[str] = os.path.join(PATH, "attributes.txt")
_UpperCAmelCase : Any = os.path.join(PATH, "objects.txt")
_UpperCAmelCase : str = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
_UpperCAmelCase : List[str] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
_UpperCAmelCase : List[str] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
_UpperCAmelCase : Optional[Any] = "pytorch_model.bin"
_UpperCAmelCase : int = "config.yaml"
def lowerCAmelCase_ (lowercase__ : List[str]=OBJECTS , lowercase__ : Optional[int]=ATTRIBUTES ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = []
with open(lowercase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCAmelCase__ = []
with open(lowercase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase_ (lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = OrderedDict()
with open(lowercase__ , '''rb''' ) as f:
lowerCAmelCase__ = pkl.load(lowercase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCAmelCase__ = ckp.pop(lowercase__ )
if isinstance(lowercase__ , np.ndarray ):
lowerCAmelCase__ = torch.tensor(lowercase__ )
else:
assert isinstance(lowercase__ , torch.tensor ), type(lowercase__ )
lowerCAmelCase__ = v
return r
class lowerCAmelCase_ :
UpperCamelCase_ :Any = {}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str = "root" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ):
lowerCAmelCase__ = name
lowerCAmelCase__ = level
lowerCAmelCase__ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCAmelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = Config(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , level=level + 1 )
lowerCAmelCase__ = v
setattr(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = d
def __repr__( self : List[str] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase__ = val
lowerCAmelCase__ = val
lowerCAmelCase__ = key.split('''.''' )
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ = self._pointer
if len(SCREAMING_SNAKE_CASE_ ) > 1:
for i, l in enumerate(SCREAMING_SNAKE_CASE_ ):
if hasattr(self , SCREAMING_SNAKE_CASE_ ) and isinstance(getattr(self , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
setattr(getattr(self , SCREAMING_SNAKE_CASE_ ) , '''.'''.join(levels[i:] ) , SCREAMING_SNAKE_CASE_ )
if l == last_level:
lowerCAmelCase__ = val
else:
lowerCAmelCase__ = pointer[l]
def __snake_case ( self : Dict ):
return self._pointer
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
with open(f'{file_name}' , '''w''' ) as stream:
dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
with open(f'{file_name}' , '''w''' ) as stream:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@staticmethod
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple ):
with open(SCREAMING_SNAKE_CASE_ ) as stream:
lowerCAmelCase__ = load(SCREAMING_SNAKE_CASE_ , Loader=SCREAMING_SNAKE_CASE_ )
return data
def __str__( self : List[Any] ):
lowerCAmelCase__ = ''' '''
if self._name != "root":
lowerCAmelCase__ = f'{t * (self._level-1)}{self._name}:\n'
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(SCREAMING_SNAKE_CASE_ ).__name__})\n'
lowerCAmelCase__ = level
return r[:-1]
@classmethod
def __snake_case ( cls : List[str] , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return cls(SCREAMING_SNAKE_CASE_ )
@classmethod
def __snake_case ( cls : Dict , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase__ = kwargs.pop('''cache_dir''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = kwargs.pop('''force_download''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = kwargs.pop('''resume_download''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = kwargs.pop('''proxies''' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = kwargs.pop('''local_files_only''' , SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif os.path.isfile(SCREAMING_SNAKE_CASE_ ) or is_remote_url(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = pretrained_model_name_or_path
else:
lowerCAmelCase__ = hf_bucket_url(SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , use_cdn=SCREAMING_SNAKE_CASE_ )
try:
# Load from URL or cache if already cached
lowerCAmelCase__ = cached_path(
SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCAmelCase__ = Config.load_yaml(SCREAMING_SNAKE_CASE_ )
except EnvironmentError:
lowerCAmelCase__ = '''Can\'t load config for'''
raise EnvironmentError(SCREAMING_SNAKE_CASE_ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(SCREAMING_SNAKE_CASE_ ), kwargs
def lowerCAmelCase_ (lowercase__ : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = torch.load('''dump.pt''' , map_location=in_tensor.device )
lowerCAmelCase__ = in_tensor.numpy()
lowerCAmelCase__ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowercase__ , lowercase__ , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(lowercase__ , lowercase__ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %'
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowerCAmelCase_ (lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = urlparse(lowercase__ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str , lowercase__ : List[str]=True ) -> str:
'''simple docstring'''
lowerCAmelCase__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCAmelCase__ = '''/''' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Optional[int]=0 , lowercase__ : str=None , ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase__ , lowercase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowercase__ , lowercase__ ) for k, v in user_agent.items() )
elif isinstance(lowercase__ , lowercase__ ):
ua += "; " + user_agent
lowerCAmelCase__ = {'''user-agent''': ua}
if resume_size > 0:
lowerCAmelCase__ = '''bytes=%d-''' % (resume_size,)
lowerCAmelCase__ = requests.get(lowercase__ , stream=lowercase__ , proxies=lowercase__ , headers=lowercase__ )
if response.status_code == 4_16: # Range not satisfiable
return
lowerCAmelCase__ = response.headers.get('''Content-Length''' )
lowerCAmelCase__ = resume_size + int(lowercase__ ) if content_length is not None else None
lowerCAmelCase__ = tqdm(
unit='''B''' , unit_scale=lowercase__ , total=lowercase__ , initial=lowercase__ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase__ ) )
temp_file.write(lowercase__ )
progress.close()
def lowerCAmelCase_ (lowercase__ : Union[str, Any] , lowercase__ : Dict=None , lowercase__ : List[Any]=False , lowercase__ : int=None , lowercase__ : List[str]=10 , lowercase__ : Tuple=False , lowercase__ : Optional[Any]=None , lowercase__ : str=False , ) -> int:
'''simple docstring'''
if cache_dir is None:
lowerCAmelCase__ = TRANSFORMERS_CACHE
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = str(lowercase__ )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
lowerCAmelCase__ = None
if not local_files_only:
try:
lowerCAmelCase__ = requests.head(lowercase__ , allow_redirects=lowercase__ , proxies=lowercase__ , timeout=lowercase__ )
if response.status_code == 2_00:
lowerCAmelCase__ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCAmelCase__ = url_to_filename(lowercase__ , lowercase__ )
# get cache path to put the file
lowerCAmelCase__ = os.path.join(lowercase__ , lowercase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase__ ):
return cache_path
else:
lowerCAmelCase__ = [
file
for file in fnmatch.filter(os.listdir(lowercase__ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowercase__ ) > 0:
return os.path.join(lowercase__ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowercase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCAmelCase__ = cache_path + '''.lock'''
with FileLock(lowercase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCAmelCase__ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowercase__ , '''a+b''' ) as f:
yield f
lowerCAmelCase__ = _resumable_file_manager
if os.path.exists(lowercase__ ):
lowerCAmelCase__ = os.stat(lowercase__ ).st_size
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = partial(tempfile.NamedTemporaryFile , dir=lowercase__ , delete=lowercase__ )
lowerCAmelCase__ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , lowercase__ , temp_file.name , )
http_get(
lowercase__ , lowercase__ , proxies=lowercase__ , resume_size=lowercase__ , user_agent=lowercase__ , )
os.replace(temp_file.name , lowercase__ )
lowerCAmelCase__ = {'''url''': url, '''etag''': etag}
lowerCAmelCase__ = cache_path + '''.json'''
with open(lowercase__ , '''w''' ) as meta_file:
json.dump(lowercase__ , lowercase__ )
return cache_path
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : int=None ) -> str:
'''simple docstring'''
lowerCAmelCase__ = url.encode('''utf-8''' )
lowerCAmelCase__ = shaaaa(lowercase__ )
lowerCAmelCase__ = url_hash.hexdigest()
if etag:
lowerCAmelCase__ = etag.encode('''utf-8''' )
lowerCAmelCase__ = shaaaa(lowercase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Optional[Any]=None , lowercase__ : Any=False , lowercase__ : Optional[int]=None , lowercase__ : Any=False , lowercase__ : int=None , lowercase__ : str=False , lowercase__ : str=False , lowercase__ : List[Any]=False , ) -> List[Any]:
'''simple docstring'''
if cache_dir is None:
lowerCAmelCase__ = TRANSFORMERS_CACHE
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = str(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = str(lowercase__ )
if is_remote_url(lowercase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCAmelCase__ = get_from_cache(
lowercase__ , cache_dir=lowercase__ , force_download=lowercase__ , proxies=lowercase__ , resume_download=lowercase__ , user_agent=lowercase__ , local_files_only=lowercase__ , )
elif os.path.exists(lowercase__ ):
# File, and it exists.
lowerCAmelCase__ = url_or_filename
elif urlparse(lowercase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowercase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowercase__ ) )
if extract_compressed_file:
if not is_zipfile(lowercase__ ) and not tarfile.is_tarfile(lowercase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCAmelCase__ , lowerCAmelCase__ = os.path.split(lowercase__ )
lowerCAmelCase__ = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
lowerCAmelCase__ = os.path.join(lowercase__ , lowercase__ )
if os.path.isdir(lowercase__ ) and os.listdir(lowercase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCAmelCase__ = output_path + '''.lock'''
with FileLock(lowercase__ ):
shutil.rmtree(lowercase__ , ignore_errors=lowercase__ )
os.makedirs(lowercase__ )
if is_zipfile(lowercase__ ):
with ZipFile(lowercase__ , '''r''' ) as zip_file:
zip_file.extractall(lowercase__ )
zip_file.close()
elif tarfile.is_tarfile(lowercase__ ):
lowerCAmelCase__ = tarfile.open(lowercase__ )
tar_file.extractall(lowercase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowercase__ ) )
return output_path_extracted
return output_path
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple="," ) -> str:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
if os.path.isfile(lowercase__ ):
with open(lowercase__ ) as f:
lowerCAmelCase__ = eval(f.read() )
else:
lowerCAmelCase__ = requests.get(lowercase__ )
try:
lowerCAmelCase__ = requests.json()
except Exception:
lowerCAmelCase__ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCAmelCase__ = eval(lowercase__ )
except Exception:
lowerCAmelCase__ = data.split('''\n''' )
req.close()
return data
def lowerCAmelCase_ (lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = requests.get(lowercase__ )
lowerCAmelCase__ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase_ (lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase__ )
with open(lowercase__ , '''rb''' ) as stream:
lowerCAmelCase__ = pkl.load(lowercase__ )
lowerCAmelCase__ = weights.pop('''model''' )
lowerCAmelCase__ = {}
for k, v in model.items():
lowerCAmelCase__ = torch.from_numpy(lowercase__ )
if "running_var" in k:
lowerCAmelCase__ = torch.tensor([0] )
lowerCAmelCase__ = k.replace('''running_var''' , '''num_batches_tracked''' )
lowerCAmelCase__ = zero
return new
def lowerCAmelCase_ () -> Optional[Any]:
'''simple docstring'''
print(f'{os.path.abspath(os.path.join(lowercase__ , os.pardir ) )}/demo.ipynb' )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : int="RGB" ) -> str:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
if os.path.isfile(lowercase__ ):
lowerCAmelCase__ = cva.imread(lowercase__ )
else:
lowerCAmelCase__ = get_image_from_url(lowercase__ )
assert img is not None, f'could not connect to: {im}'
lowerCAmelCase__ = cva.cvtColor(lowercase__ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCAmelCase__ = img[:, :, ::-1]
return img
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[str]=1 ) -> int:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(lowercase__ ) , lowercase__ ))
| 668 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_048 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Dict=[16, 16] , SCREAMING_SNAKE_CASE_ : Tuple=128 , SCREAMING_SNAKE_CASE_ : Optional[Any]=44_100 , SCREAMING_SNAKE_CASE_ : Optional[int]=86 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_048 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spectrogram_length
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = feature_size // self.patch_size[1]
lowerCAmelCase__ = n_fft
lowerCAmelCase__ = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : np.array ):
lowerCAmelCase__ = spectrogram(
SCREAMING_SNAKE_CASE_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCAmelCase__ = log_spec[:, :-1]
lowerCAmelCase__ = log_spec - 20.0
lowerCAmelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ = np.ones([len(SCREAMING_SNAKE_CASE_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = audio_features[i]
lowerCAmelCase__ = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features}
lowerCAmelCase__ = BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
return encoded_inputs
| 668 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 |
from collections import namedtuple
_UpperCAmelCase : Dict = namedtuple("from_to", "from_ to")
_UpperCAmelCase : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : str , lowercase__ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Dict = 'decision_transformer'
UpperCamelCase_ :Union[str, Any] = ['past_key_values']
UpperCamelCase_ :List[str] = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[str]=17 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Dict=128 , SCREAMING_SNAKE_CASE_ : int=4_096 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_024 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[int]="relu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Any=1e-5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=50_256 , SCREAMING_SNAKE_CASE_ : Tuple=50_256 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCAmelCase__ = state_dim
lowerCAmelCase__ = act_dim
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = max_ep_len
lowerCAmelCase__ = action_tanh
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = n_inner
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = attn_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scale_attn_weights
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = scale_attn_by_inverse_layer_idx
lowerCAmelCase__ = reorder_and_upcast_attn
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = collection[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = i - 1
while low <= high:
lowerCAmelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase__ = collection[j - 1]
lowerCAmelCase__ = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 668 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_UpperCAmelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_UpperCAmelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
_UpperCAmelCase : set[int] = {ord(char) for char in VALID_CHARS}
_UpperCAmelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCAmelCase_ (lowercase__ : list[int] , lowercase__ : tuple[int, ...] ) -> str | None:
'''simple docstring'''
lowerCAmelCase__ = ""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for keychar, cipherchar in zip(cycle(lowercase__ ) , lowercase__ ):
lowerCAmelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase__ )
return decoded
def lowerCAmelCase_ (lowercase__ : list[int] ) -> list[str]:
'''simple docstring'''
lowerCAmelCase__ = []
for key in product(lowercase__ , repeat=3 ):
lowerCAmelCase__ = try_key(lowercase__ , lowercase__ )
if encoded is not None:
possibles.append(lowercase__ )
return possibles
def lowerCAmelCase_ (lowercase__ : list[str] , lowercase__ : str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCAmelCase_ (lowercase__ : str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = Path(lowercase__ ).parent.joinpath(lowercase__ ).read_text(encoding='''utf-8''' )
lowerCAmelCase__ = [int(lowercase__ ) for number in data.strip().split(''',''' )]
lowerCAmelCase__ = filter_valid_chars(lowercase__ )
for common_word in COMMON_WORDS:
lowerCAmelCase__ = filter_common_word(lowercase__ , lowercase__ )
if len(lowercase__ ) == 1:
break
lowerCAmelCase__ = possibles[0]
return sum(ord(lowercase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 668 |
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ ) + 1
lowerCAmelCase__ = len(lowercase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowercase__ ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowercase__ ):
for j in range(1 , lowercase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCAmelCase : Union[str, Any] = "aab"
_UpperCAmelCase : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 668 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = 13
lowerCAmelCase__ = 7
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 99
lowerCAmelCase__ = 32
lowerCAmelCase__ = 2
lowerCAmelCase__ = 4
lowerCAmelCase__ = 37
lowerCAmelCase__ = '''gelu'''
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = 512
lowerCAmelCase__ = 16
lowerCAmelCase__ = 2
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = None
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Any ):
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase__ = True
lowerCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = True
lowerCAmelCase__ = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
# Also check the case where encoder outputs are not passed
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Any = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Tuple = False
UpperCamelCase_ :Union[str, Any] = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFEsmModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def __snake_case ( self : str ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : int ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __snake_case ( self : List[str] ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __snake_case ( self : Dict ):
pass
def __snake_case ( self : List[str] ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase__ = model.get_bias()
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for k, v in name.items():
assert isinstance(SCREAMING_SNAKE_CASE_ , tf.Variable )
else:
lowerCAmelCase__ = model.get_output_embeddings()
assert x is None
lowerCAmelCase__ = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : int ):
lowerCAmelCase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowerCAmelCase__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 668 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.