code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_UpperCamelCase : int = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _lowercase( a_ ):
"""simple docstring"""
__lowerCamelCase = '''perceiver'''
def __init__( self: int ,a: Dict=256 ,a: Optional[int]=1280 ,a: Optional[int]=768 ,a: Dict=1 ,a: Optional[Any]=26 ,a: List[Any]=8 ,a: Union[str, Any]=8 ,a: Dict=None ,a: Tuple=None ,a: str="kv" ,a: Optional[int]=1 ,a: List[Any]=1 ,a: Tuple="gelu" ,a: Optional[Any]=0.1 ,a: Optional[Any]=0.02 ,a: Union[str, Any]=1e-12 ,a: Any=True ,a: List[str]=262 ,a: str=2048 ,a: Optional[Any]=56 ,a: Union[str, Any]=[368, 496] ,a: List[Any]=16 ,a: List[Any]=1920 ,a: List[Any]=16 ,a: Tuple=[1, 16, 224, 224] ,**a: Any ,):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase = num_latents
__UpperCAmelCase = d_latents
__UpperCAmelCase = d_model
__UpperCAmelCase = num_blocks
__UpperCAmelCase = num_self_attends_per_block
__UpperCAmelCase = num_self_attention_heads
__UpperCAmelCase = num_cross_attention_heads
__UpperCAmelCase = qk_channels
__UpperCAmelCase = v_channels
__UpperCAmelCase = cross_attention_shape_for_attention
__UpperCAmelCase = self_attention_widening_factor
__UpperCAmelCase = cross_attention_widening_factor
__UpperCAmelCase = hidden_act
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = use_query_residual
# masked language modeling attributes
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
# image classification attributes
__UpperCAmelCase = image_size
# flow attributes
__UpperCAmelCase = train_size
# multimodal autoencoding attributes
__UpperCAmelCase = num_frames
__UpperCAmelCase = audio_samples_per_frame
__UpperCAmelCase = samples_per_patch
__UpperCAmelCase = output_shape
class _lowercase( a_ ):
"""simple docstring"""
@property
def snake_case ( self: str ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def snake_case ( self: Union[str, Any] ):
return 1e-4
def snake_case ( self: Tuple ,a: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,a: int = -1 ,a: int = -1 ,a: int = -1 ,a: bool = False ,a: Optional[TensorType] = None ,a: int = 3 ,a: int = 40 ,a: int = 40 ,):
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__UpperCAmelCase = compute_effective_axis_dimension(
UpperCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCAmelCase = preprocessor.num_special_tokens_to_add(UpperCamelCase_ )
__UpperCAmelCase = compute_effective_axis_dimension(
UpperCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
__UpperCAmelCase = [' '.join(['a'] ) * seq_length] * batch_size
__UpperCAmelCase = dict(preprocessor(UpperCamelCase_ ,return_tensors=UpperCamelCase_ ) )
__UpperCAmelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(UpperCamelCase_ ,UpperCamelCase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__UpperCAmelCase = compute_effective_axis_dimension(UpperCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch )
__UpperCAmelCase = self._generate_dummy_images(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
__UpperCAmelCase = dict(preprocessor(images=UpperCamelCase_ ,return_tensors=UpperCamelCase_ ) )
__UpperCAmelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 396
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case : List[Any] = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
snake_case : Tuple = {"""mobilebert-uncased""": 5_1_2}
snake_case : List[str] = {}
class UpperCamelCase__ ( a_):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = MobileBertTokenizer
def __init__( self : Optional[int] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Any="[UNK]" , UpperCamelCase_ : str="[SEP]" , UpperCamelCase_ : Any="[PAD]" , UpperCamelCase_ : Dict="[CLS]" , UpperCamelCase_ : str="[MASK]" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Dict , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars
):
__magic_name__ = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) )
__magic_name__ = do_lower_case
__magic_name__ = strip_accents
__magic_name__ = tokenize_chinese_chars
__magic_name__ = normalizer_class(**UpperCamelCase_ )
__magic_name__ = do_lower_case
def a__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
__magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
'''simple docstring'''
__magic_name__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 545
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(SCREAMING_SNAKE_CASE__ ) , "Tatoeba directory does not exist." )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
A : Tuple = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
self.resolver.convert_models(["""heb-eng"""] )
@slow
def _lowerCAmelCase ( self ):
A : Dict = self.resolver.write_model_card("""opus-mt-he-en""", dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 710
|
def __UpperCamelCase ( _lowerCAmelCase ) -> list:
"""simple docstring"""
def merge(_lowerCAmelCase , _lowerCAmelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowerCAmelCase ) <= 1:
return collection
A : int = len(_lowerCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_:Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE_:Tuple = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 520
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
|
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> float:
'''simple docstring'''
def get_matched_characters(_snake_case : str , _snake_case : str ) -> str:
__magic_name__ : str = []
__magic_name__ : Optional[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__magic_name__ : str = int(max(0 , i - limit ) )
__magic_name__ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_snake_case )
__magic_name__ : Dict = F'''{_stra[0:_stra.index(_snake_case )]} {_stra[_stra.index(_snake_case ) + 1:]}'''
return "".join(_snake_case )
# matching characters
__magic_name__ : List[Any] = get_matched_characters(_snake_case , _snake_case )
__magic_name__ : Any = get_matched_characters(_snake_case , _snake_case )
__magic_name__ : List[str] = len(_snake_case )
# transposition
__magic_name__ : Tuple = (
len([(ca, ca) for ca, ca in zip(_snake_case , _snake_case ) if ca != ca] ) // 2
)
if not match_count:
__magic_name__ : Tuple = 0.0
else:
__magic_name__ : List[str] = (
1
/ 3
* (
match_count / len(_snake_case )
+ match_count / len(_snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__magic_name__ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 124
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ : int = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = """retribert"""
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any]=3_0522 , _SCREAMING_SNAKE_CASE : int=768 , _SCREAMING_SNAKE_CASE : str=8 , _SCREAMING_SNAKE_CASE : Tuple=12 , _SCREAMING_SNAKE_CASE : Union[str, Any]=3072 , _SCREAMING_SNAKE_CASE : Tuple="gelu" , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : int=512 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Dict=0.0_2 , _SCREAMING_SNAKE_CASE : Optional[Any]=1E-1_2 , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : int=128 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , **_SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = share_encoders
UpperCamelCase = projection_dim
| 704
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__magic_name__ : List[str] = ''''''
__magic_name__ : str = ''''''
__magic_name__ : str = ''''''
__magic_name__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def lowercase__ ( ) -> None:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = get_dataset(_UpperCamelCase , _UpperCamelCase)
print('Processing...')
UpperCamelCase , UpperCamelCase , UpperCamelCase = update_image_and_anno(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
for index, image in enumerate(_UpperCamelCase):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCamelCase = random_chars(32)
UpperCamelCase = paths[index].split(os.sep)[-1].rsplit('.' , 1)[0]
UpperCamelCase = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , _UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85])
print(F'Success {index+1}/{len(_UpperCamelCase)} with {file_name}')
UpperCamelCase = []
for anno in new_annos[index]:
UpperCamelCase = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(_UpperCamelCase)
with open(F'/{file_root}.txt' , 'w') as outfile:
outfile.write('\n'.join(line for line in annos_list))
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> tuple[list, list]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
for label_file in glob.glob(os.path.join(_UpperCamelCase , '*.txt')):
UpperCamelCase = label_file.split(os.sep)[-1].rsplit('.' , 1)[0]
with open(_UpperCamelCase) as in_file:
UpperCamelCase = in_file.readlines()
UpperCamelCase = os.path.join(_UpperCamelCase , F'{label_name}.jpg')
UpperCamelCase = []
for obj_list in obj_lists:
UpperCamelCase = obj_list.rstrip('\n').split(' ')
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
])
if not boxes:
continue
img_paths.append(_UpperCamelCase)
labels.append(_UpperCamelCase)
return img_paths, labels
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1) -> tuple[list, list, list]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for idx in range(len(_UpperCamelCase)):
UpperCamelCase = []
UpperCamelCase = img_list[idx]
path_list.append(_UpperCamelCase)
UpperCamelCase = anno_list[idx]
UpperCamelCase = cva.imread(_UpperCamelCase)
if flip_type == 1:
UpperCamelCase = cva.flip(_UpperCamelCase , _UpperCamelCase)
for bbox in img_annos:
UpperCamelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
UpperCamelCase = cva.flip(_UpperCamelCase , _UpperCamelCase)
for bbox in img_annos:
UpperCamelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(_UpperCamelCase)
new_imgs_list.append(_UpperCamelCase)
return new_imgs_list, new_annos_lists, path_list
def lowercase__ ( _UpperCamelCase = 32) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
UpperCamelCase = ascii_lowercase + digits
return "".join(random.choice(_UpperCamelCase) for _ in range(_UpperCamelCase))
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 410
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCAmelCase ( _lowerCAmelCase ) -> str:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
__snake_case = create_tensor(_lowerCAmelCase )
__snake_case = gather(_lowerCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCAmelCase ( _lowerCAmelCase ) -> Dict:
'''simple docstring'''
__snake_case = [state.process_index]
__snake_case = gather_object(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == state.num_processes, F'''{gathered_obj}, {len(_lowerCAmelCase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _lowerCAmelCase ( _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
__snake_case = create_tensor(_lowerCAmelCase )
__snake_case = broadcast(_lowerCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCAmelCase ( _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if state.is_main_process:
__snake_case = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__snake_case = torch.arange(state.num_processes ).to(state.device )
__snake_case = pad_across_processes(_lowerCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
if state.num_processes != 2:
return
__snake_case = create_tensor(_lowerCAmelCase )
__snake_case = reduce(_lowerCAmelCase , "sum" )
__snake_case = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), F'''{reduced_tensor} != {truth_tensor}'''
def _lowerCAmelCase ( _lowerCAmelCase ) -> Dict:
'''simple docstring'''
if state.num_processes != 2:
return
__snake_case = create_tensor(_lowerCAmelCase )
__snake_case = reduce(_lowerCAmelCase , "mean" )
__snake_case = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), F'''{reduced_tensor} != {truth_tensor}'''
def _lowerCAmelCase ( _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
def _lowerCAmelCase ( ) -> str:
'''simple docstring'''
__snake_case = PartialState()
state.print(F'''State: {state}''' )
state.print("testing gather" )
test_gather(_lowerCAmelCase )
state.print("testing gather_object" )
test_gather_object(_lowerCAmelCase )
state.print("testing broadcast" )
test_broadcast(_lowerCAmelCase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_lowerCAmelCase )
state.print("testing reduce_sum" )
test_reduce_sum(_lowerCAmelCase )
state.print("testing reduce_mean" )
test_reduce_mean(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 371
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase=0.9_99 , _lowerCAmelCase="cosine" , ) -> int:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__snake_case = []
for i in range(_lowerCAmelCase ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class UpperCamelCase( _a , _a ):
snake_case_ : Tuple = [e.name for e in KarrasDiffusionSchedulers]
snake_case_ : Union[str, Any] = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int = 1_0_0_0 , SCREAMING_SNAKE_CASE : float = 0.00085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ) -> Optional[int]:
'''simple docstring'''
if trained_betas is not None:
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int=None ) -> Optional[int]:
'''simple docstring'''
if schedule_timesteps is None:
__snake_case = self.timesteps
__snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__snake_case = 1 if len(SCREAMING_SNAKE_CASE ) > 1 else 0
else:
__snake_case = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE ) else timestep
__snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case = self.index_for_timestep(SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
else:
__snake_case = self.sigmas_interpol[step_index]
__snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> Optional[int]:
'''simple docstring'''
__snake_case = num_inference_steps
__snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__snake_case = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
__snake_case = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE ) ) , SCREAMING_SNAKE_CASE )
__snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE )
# interpolate sigmas
__snake_case = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__snake_case = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__snake_case = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
# mps does not support float64
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
# interpolate timesteps
__snake_case = self.sigma_to_t(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
__snake_case = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__snake_case = torch.cat([timesteps[:1], interleaved_timesteps] )
__snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case = defaultdict(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = sigma.log()
# get distribution
__snake_case = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__snake_case = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__snake_case = low_idx + 1
__snake_case = self.log_sigmas[low_idx]
__snake_case = self.log_sigmas[high_idx]
# interpolate sigmas
__snake_case = (low - log_sigma) / (low - high)
__snake_case = w.clamp(0 , 1 )
# transform interpolation to time range
__snake_case = (1 - w) * low_idx + w * high_idx
__snake_case = t.view(sigma.shape )
return t
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.sample is None
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
__snake_case = self.index_for_timestep(SCREAMING_SNAKE_CASE )
# advance index counter by 1
__snake_case = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
__snake_case = self.sigmas_interpol[step_index + 1]
__snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__snake_case = self.sigmas[step_index - 1]
__snake_case = self.sigmas_interpol[step_index]
__snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case = 0
__snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case = sigma_interpol - sigma_hat
# store for 2nd order step
__snake_case = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__snake_case = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__snake_case = sigma_next - sigma_hat
__snake_case = self.sample
__snake_case = None
__snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE ):
# mps does not support float64
__snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__snake_case = self.timesteps.to(original_samples.device )
__snake_case = timesteps.to(original_samples.device )
__snake_case = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for t in timesteps]
__snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__snake_case = sigma.unsqueeze(-1 )
__snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 371
| 1
|
from __future__ import annotations
import os
from typing import Any
import requests
lowerCamelCase__ = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCamelCase__ = BASE_URL + "/user"
# https://github.com/settings/tokens
lowerCamelCase__ = os.environ.get("USER_TOKEN", "")
def __A(lowerCAmelCase ) -> dict[Any, Any]:
"""simple docstring"""
_UpperCamelCase = {
"""Authorization""": F'token {auth_token}',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 202
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["YolosFeatureExtractor"]
lowerCamelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Optional[Any] = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 376
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
A_ = VideoMAEConfig()
set_architecture_configs(UpperCAmelCase__, UpperCAmelCase__ )
if "finetuned" not in model_name:
A_ = False
if "finetuned" in model_name:
A_ = """huggingface/label-files"""
if "kinetics" in model_name:
A_ = 4_00
A_ = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
A_ = 1_74
A_ = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
A_ = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if "small" in model_name:
A_ = 3_84
A_ = 15_36
A_ = 12
A_ = 16
A_ = 12
A_ = 3
A_ = 1_92
A_ = 7_68
elif "large" in model_name:
A_ = 10_24
A_ = 40_96
A_ = 24
A_ = 16
A_ = 12
A_ = 8
A_ = 5_12
A_ = 20_48
elif "huge" in model_name:
A_ = 12_80
A_ = 51_20
A_ = 32
A_ = 16
A_ = 12
A_ = 8
A_ = 6_40
A_ = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
if "encoder." in name:
A_ = name.replace("""encoder.""", """""" )
if "cls_token" in name:
A_ = name.replace("""cls_token""", """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
A_ = name.replace("""decoder_pos_embed""", """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
A_ = name.replace("""pos_embed""", """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
A_ = name.replace("""patch_embed.proj""", """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A_ = name.replace("""patch_embed.norm""", """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
A_ = name.replace("""decoder.blocks""", """decoder.decoder_layers""" )
if "blocks" in name:
A_ = name.replace("""blocks""", """videomae.encoder.layer""" )
if "attn.proj" in name:
A_ = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name and "bias" not in name:
A_ = name.replace("""attn""", """attention.self""" )
if "attn" in name:
A_ = name.replace("""attn""", """attention.attention""" )
if "norm1" in name:
A_ = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A_ = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
A_ = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A_ = name.replace("""mlp.fc2""", """output.dense""" )
if "decoder_embed" in name:
A_ = name.replace("""decoder_embed""", """decoder.decoder_embed""" )
if "decoder_norm" in name:
A_ = name.replace("""decoder_norm""", """decoder.decoder_norm""" )
if "decoder_pred" in name:
A_ = name.replace("""decoder_pred""", """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A_ = name.replace("""norm.weight""", """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A_ = name.replace("""norm.bias""", """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
A_ = name.replace("""head""", """classifier""" )
return name
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(UpperCAmelCase__ )
if key.startswith("""encoder.""" ):
A_ = key.replace("""encoder.""", """""" )
if "qkv" in key:
A_ = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
A_ = config.decoder_hidden_size
A_ = int(key_split[2] )
A_ = """decoder.decoder_layers."""
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = config.hidden_size
A_ = int(key_split[1] )
A_ = """videomae.encoder.layer."""
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Any:
A_ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename="""eating_spaghetti.npy""", repo_type="""dataset""" )
A_ = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = get_videomae_config(UpperCAmelCase__ )
if "finetuned" in model_name:
A_ = VideoMAEForVideoClassification(UpperCAmelCase__ )
else:
A_ = VideoMAEForPreTraining(UpperCAmelCase__ )
# download original checkpoint, hosted on Google Drive
A_ = """pytorch_model.bin"""
gdown.cached_download(UpperCAmelCase__, UpperCAmelCase__, quiet=UpperCAmelCase__ )
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )
if "model" in files:
A_ = files["""model"""]
else:
A_ = files["""module"""]
A_ = convert_state_dict(UpperCAmelCase__, UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# verify model on basic input
A_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
A_ = prepare_video()
A_ = image_processor(UpperCAmelCase__, return_tensors="""pt""" )
if "finetuned" not in model_name:
A_ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""", filename="""bool_masked_pos.pt""" )
A_ = torch.load(UpperCAmelCase__ )
A_ = model(**UpperCAmelCase__ )
A_ = outputs.logits
A_ = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
A_ = torch.Size([1, 1_74] )
A_ = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
A_ = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A_ = torch.Size([1, 1_74] )
A_ = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
A_ = torch.Size([1, 1_74] )
A_ = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3], UpperCAmelCase__, atol=1e-4 )
else:
print("""Logits:""", logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3], UpperCAmelCase__, atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
A_ = outputs.loss
assert torch.allclose(UpperCAmelCase__, UpperCAmelCase__, atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(UpperCAmelCase__, organization="""nielsr""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCamelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 288
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_snake_case , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_snake_case , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_snake_case )
return parser.parse_args()
def _snake_case ( ) -> str:
'''simple docstring'''
_A = parse_args()
# Import training_script as a module.
_A = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_A = script_fpath.stem
_A = importlib.import_module(_snake_case )
# Patch sys.argv
_A = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 505
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gptsan-japanese'''
UpperCAmelCase : List[Any] = [
'''past_key_values''',
]
UpperCAmelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Any , _UpperCAmelCase : List[Any]=36_000 , _UpperCAmelCase : str=1_280 , _UpperCAmelCase : Tuple=1_024 , _UpperCAmelCase : Union[str, Any]=8_192 , _UpperCAmelCase : Any=4_096 , _UpperCAmelCase : Optional[int]=128 , _UpperCAmelCase : int=10 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : Optional[Any]=128 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Optional[Any]=1E-5 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[str]="float32" , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : str=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Optional[Any]=0.002 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]=35_998 , _UpperCAmelCase : Any=35_995 , _UpperCAmelCase : Any=35_999 , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = d_ff
_A = d_ext
_A = d_spout
_A = num_switch_layers
_A = num_ext_layers
_A = num_switch_layers + num_ext_layers
_A = num_heads
_A = num_experts
_A = expert_capacity
_A = dropout_rate
_A = layer_norm_epsilon
_A = router_bias
_A = router_jitter_noise
_A = router_dtype
_A = router_ignore_padding_tokens
_A = output_hidden_states
_A = output_attentions
_A = initializer_factor
_A = output_router_logits
_A = use_cache
super().__init__(
separator_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 505
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( A_ )-> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
| 3
|
'''simple docstring'''
def A_( A : list[int]):
UpperCamelCase = []
if len(A) == 1:
return [nums.copy()]
for _ in range(len(A)):
UpperCamelCase = nums.pop(0)
UpperCamelCase = permute(A)
for perm in permutations:
perm.append(A)
result.extend(A)
nums.append(A)
return result
def A_( A : str):
def backtrack(A : str):
if start == len(A) - 1:
output.append(nums[:])
else:
for i in range(A , len(A)):
UpperCamelCase , UpperCamelCase = nums[i], nums[start]
backtrack(start + 1)
UpperCamelCase , UpperCamelCase = nums[i], nums[start] # backtrack
UpperCamelCase = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 3
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Tuple=5_12 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Tuple=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = projection_dim
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase = input_mask.numpy()
UpperCAmelCase , UpperCAmelCase = input_mask.shape
UpperCAmelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
UpperCAmelCase = 1
UpperCAmelCase = 0
UpperCAmelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = TFBlipTextModel(config=UpperCamelCase__ )
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , training=UpperCamelCase__ )
UpperCAmelCase = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Tuple =(TFBlipTextModel,) if is_tf_available() else ()
lowercase : List[Any] =False
lowercase : Optional[Any] =False
lowercase : List[str] =False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = BlipTextModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFBlipTextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : Dict=True ) -> Dict:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase__ )
| 702
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase : Any = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 457
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = "glpn"
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : List[str]=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE : Optional[Any]=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE : Tuple=[32, 64, 160, 256] , __SCREAMING_SNAKE_CASE : Optional[Any]=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : int=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE : Any=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=1e-6 , __SCREAMING_SNAKE_CASE : Dict=64 , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : int=-1 , **__SCREAMING_SNAKE_CASE : int , ) -> str:
super().__init__(**__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = num_channels
a_ : Tuple = num_encoder_blocks
a_ : List[str] = depths
a_ : str = sr_ratios
a_ : Optional[Any] = hidden_sizes
a_ : List[str] = patch_sizes
a_ : Tuple = strides
a_ : Union[str, Any] = mlp_ratios
a_ : Optional[int] = num_attention_heads
a_ : int = hidden_act
a_ : List[Any] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Union[str, Any] = initializer_range
a_ : str = drop_path_rate
a_ : Union[str, Any] = layer_norm_eps
a_ : Optional[int] = decoder_hidden_size
a_ : str = max_depth
a_ : str = head_in_index
| 466
|
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowerCAmelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__lowerCAmelCase = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
__lowerCAmelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__lowerCAmelCase = 'allenai'
def _UpperCAmelCase ( __A : Union[str, Any] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
a_ : Union[str, Any] = dict((re.sub(R'''@@$''' , '''''' , __A ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , __A ), v) for k, v in d.items() )
a_ : str = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
a_ : str = d[k] # restore
return da
def _UpperCAmelCase ( __A : List[Any] , __A : List[str] ):
# prep
assert os.path.exists(__A )
os.makedirs(__A , exist_ok=__A )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
a_ : Union[str, Any] = basename(__A )
a_ : Optional[Any] = dirname(__A )
a_ : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
a_ : str = cls.hub_models()
a_ : List[str] = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
a_ : str = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'using checkpoint {checkpoint_file}' )
a_ : Any = hub_utils.from_pretrained(
__A , __A , __A , archive_map=__A , **__A )
a_ : Optional[int] = vars(chkpt['''args''']['''model'''] )
a_ : Any = args['''source_lang''']
a_ : List[Any] = args['''target_lang''']
a_ : Union[str, Any] = dirname(__A )
a_ : int = basename(__A )
# dicts
a_ : Optional[Any] = os.path.join(__A , f'dict.{src_lang}.txt' )
a_ : int = os.path.join(__A , f'dict.{tgt_lang}.txt' )
a_ : Any = Dictionary.load(__A )
a_ : Any = rewrite_dict_keys(src_dict.indices )
a_ : List[Any] = len(__A )
a_ : Optional[Any] = os.path.join(__A , '''vocab-src.json''' )
print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
a_ : Tuple = True
for k in src_vocab.keys():
if not k.islower():
a_ : Dict = False
break
a_ : Any = Dictionary.load(__A )
a_ : List[Any] = rewrite_dict_keys(tgt_dict.indices )
a_ : int = len(__A )
a_ : Any = os.path.join(__A , '''vocab-tgt.json''' )
print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# merges_file (bpecodes)
a_ : Optional[int] = os.path.join(__A , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
a_ : Optional[Any] = os.path.join(__A , __A )
if os.path.exists(__A ):
break
with open(__A , encoding='''utf-8''' ) as fin:
a_ : Dict = fin.read()
a_ : Any = re.sub(R''' \d+$''' , '''''' , __A , 0 , re.M ) # remove frequency number
print(f'Generating {merges_file}' )
with open(__A , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(__A )
# model config
a_ : List[Any] = os.path.join(__A , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", f'need to extend tokenizer to support bpe={args["tokenizer"]}'
a_ : int = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
a_ : List[Any] = 5
a_ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
a_ : Optional[int] = best_score_hparams[model_dir]['''length_penalty''']
else:
a_ : Union[str, Any] = 1.0
print(f'Generating {fsmt_model_config_file}' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# tokenizer config
a_ : Dict = os.path.join(__A , __A )
a_ : List[str] = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 10_24,
'''do_lower_case''': do_lower_case,
}
print(f'Generating {fsmt_tokenizer_config_file}' )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# model
a_ : Any = chkpt['''models'''][0]
a_ : Optional[int] = model.state_dict()
# rename keys to start with 'model.'
a_ : Tuple = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
a_ : Optional[Any] = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(__A , __A )
a_ : str = FSMTConfig.from_pretrained(__A )
a_ : Optional[int] = FSMTForConditionalGeneration(__A )
# check that it loads ok
model_new.load_state_dict(__A , strict=__A )
# save
a_ : List[str] = os.path.join(__A , __A )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(__A , __A )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f'cd {data_root}' )
print(f'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 466
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :List[Any], snake_case :Optional[Any], snake_case :int=13, snake_case :Optional[Any]=30, snake_case :Any=2, snake_case :Union[str, Any]=3, snake_case :Union[str, Any]=True, snake_case :List[str]=True, snake_case :List[str]=32, snake_case :Any=2, snake_case :Optional[Any]=4, snake_case :Any=37, snake_case :Tuple="gelu", snake_case :Union[str, Any]=0.1, snake_case :List[str]=0.1, snake_case :Tuple=10, snake_case :Optional[int]=0.0_2, snake_case :str=3, snake_case :Dict=None, ):
"""simple docstring"""
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase =(image_size // patch_size) ** 2
_lowercase =num_patches + 1
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase =self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self :str):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=snake_case, initializer_range=self.initializer_range, )
def UpperCamelCase__ ( self :Dict, snake_case :Any, snake_case :str, snake_case :Union[str, Any]):
"""simple docstring"""
_lowercase =TFViTModel(config=snake_case)
_lowercase =model(snake_case, training=snake_case)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
_lowercase =self.image_size // 2
_lowercase =pixel_values[:, :, :image_size, :image_size]
_lowercase =model(snake_case, interpolate_pos_encoding=snake_case, training=snake_case)
_lowercase =(image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, seq_length, self.hidden_size))
def UpperCamelCase__ ( self :Any, snake_case :List[Any], snake_case :Optional[int], snake_case :Dict):
"""simple docstring"""
_lowercase =self.type_sequence_label_size
_lowercase =TFViTForImageClassification(snake_case)
_lowercase =model(snake_case, labels=snake_case, training=snake_case)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
_lowercase =self.image_size // 2
_lowercase =pixel_values[:, :, :image_size, :image_size]
_lowercase =model(snake_case, interpolate_pos_encoding=snake_case, training=snake_case)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowercase =1
_lowercase =TFViTForImageClassification(snake_case)
_lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowercase =model(snake_case)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Dict =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__lowerCAmelCase : List[str] =(
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase : List[Any] =False
__lowerCAmelCase : Any =False
__lowerCAmelCase : List[Any] =False
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =TFViTModelTester(self)
_lowercase =ConfigTester(self, config_class=snake_case, has_text_modality=snake_case, hidden_size=37)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def UpperCamelCase__ ( self :str):
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds')
def UpperCamelCase__ ( self :int):
"""simple docstring"""
pass
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(snake_case)
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer))
_lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case, tf.keras.layers.Layer))
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(snake_case)
_lowercase =inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1], snake_case)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case)
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case)
@slow
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =TFViTModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(snake_case)
def _snake_case () -> Any:
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=snake_case, return_tensors='tf')
# forward pass
_lowercase =model(**snake_case)
# verify the logits
_lowercase =tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape, snake_case)
_lowercase =tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6])
tf.debugging.assert_near(outputs.logits[0, :3], snake_case, atol=1e-4)
| 557
| 0
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _UpperCAmelCase ( yaml.SafeLoader ):
'''simple docstring'''
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = [self.constructed_objects[key_node] for key_node, _ in node.value]
A = [tuple(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else key for key in keys]
A = Counter(UpperCamelCase__ )
A = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=False ):
A = super().construct_mapping(UpperCamelCase__ , deep=UpperCamelCase__ )
self._check_no_duplicates_on_constructed_node(UpperCamelCase__ )
return mapping
def __UpperCamelCase (lowerCAmelCase : str ) -> Tuple[Optional[str], str]:
A = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A = full_content[1:].index('---' ) + 1
A = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls : int , UpperCamelCase__ : Path ):
with open(UpperCamelCase__ , encoding='utf-8' ) as readme_file:
A , A = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCamelCase__ )
else:
return cls()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Path ):
if path.exists():
with open(UpperCamelCase__ , encoding='utf-8' ) as readme_file:
A = readme_file.read()
else:
A = None
A = self._to_readme(UpperCamelCase__ )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(UpperCamelCase__ )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Optional[str] = None ):
if readme_content is not None:
A , A = _split_yaml_from_readme(UpperCamelCase__ )
A = '---\n' + self.to_yaml_string() + '---\n' + content
else:
A = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCamelCase ( cls : str , UpperCamelCase__ : str ):
A = yaml.load(UpperCamelCase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCamelCase__ )
def UpperCamelCase ( self : int ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCamelCase__ , allow_unicode=UpperCamelCase__ , encoding='utf-8' , ).decode('utf-8' )
_UpperCAmelCase = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 699
|
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699
| 1
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( _a : int ):
"""simple docstring"""
def is_in_circle(_a : float , _a : float ) -> bool:
A = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
A = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def _A ( _a : int , _a : Callable[[float], float] , _a : float = 0.0 , _a : float = 1.0 , ):
"""simple docstring"""
return mean(
function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def _A ( _a : int , _a : float = 0.0 , _a : float = 1.0 ):
"""simple docstring"""
def identity_function(_a : float ) -> float:
return x
A = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
A = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def _A ( _a : int ):
"""simple docstring"""
def function_to_integrate(_a : float ) -> float:
return sqrt(4.0 - x * x )
A = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
"""simple docstring"""
def _A ( _a : int , _a : int ):
"""simple docstring"""
while a != 0:
A , A = b % a, a
return b
def _A ( _a : int , _a : int ):
"""simple docstring"""
if gcd(_a , _a ) != 1:
A = f'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_a )
A , A , A = 1, 0, a
A , A , A = 0, 1, m
while va != 0:
A = ua // va
A , A , A , A , A , A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 255
| 0
|
"""simple docstring"""
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : Optional[int] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[str] = 0
while number > 0:
UpperCAmelCase__ : Dict = number % 10
sum_of_digits += last_digit
UpperCAmelCase__ : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def a__ ( lowerCAmelCase = 1_00 ) -> int:
UpperCAmelCase__ : Union[str, Any] = factorial(lowerCAmelCase )
UpperCAmelCase__ : int = split_and_add(lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 182
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182
| 1
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_A = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase (UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : int , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : int , _snake_case : Tuple , ) -> Any:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE__ = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate("steps_offset!=1" , "1.0.0" , _snake_case , standard_warn=_snake_case )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FrozenDict(_snake_case )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE__ = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate("skip_prk_steps not set" , "1.0.0" , _snake_case , standard_warn=_snake_case )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FrozenDict(_snake_case )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=_snake_case , segmentation_processor=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , safety_checker=_snake_case , feature_extractor=_snake_case , )
def lowerCAmelCase_ ( self : Tuple , _snake_case : Union[str, Any] = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_snake_case )
def lowerCAmelCase_ ( self : Dict ) -> Optional[Any]:
self.enable_attention_slicing(_snake_case )
def lowerCAmelCase_ ( self : List[str] ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE__ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self : List[str] ) -> Optional[int]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : str , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Dict = 512 , _snake_case : str = 512 , _snake_case : Union[str, Any] = 50 , _snake_case : List[str] = 7.5 , _snake_case : int = None , _snake_case : Dict = 1 , _snake_case : List[Any] = 0.0 , _snake_case : Tuple = None , _snake_case : List[Any] = None , _snake_case : Optional[Any] = "pil" , _snake_case : List[str] = True , _snake_case : Union[str, Any] = None , _snake_case : Any = 1 , **_snake_case : Union[str, Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
SCREAMING_SNAKE_CASE__ = self.segmentation_model(**_snake_case )
SCREAMING_SNAKE_CASE__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_snake_case )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_snake_case , image=_snake_case , mask_image=_snake_case , height=_snake_case , width=_snake_case , num_inference_steps=_snake_case , guidance_scale=_snake_case , negative_prompt=_snake_case , num_images_per_prompt=_snake_case , eta=_snake_case , generator=_snake_case , latents=_snake_case , output_type=_snake_case , return_dict=_snake_case , callback=_snake_case , callback_steps=_snake_case , )
| 719
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(__UpperCAmelCase )
while len(__UpperCAmelCase ) != 1:
SCREAMING_SNAKE_CASE__ = [int(__UpperCAmelCase ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 1
for i in range(0 , len(__UpperCAmelCase ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE__ = str(__UpperCAmelCase )
steps += 1
return steps
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(__UpperCAmelCase )
while len(__UpperCAmelCase ) != 1:
SCREAMING_SNAKE_CASE__ = [int(__UpperCAmelCase ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 0
for i in range(0 , len(__UpperCAmelCase ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE__ = str(__UpperCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 538
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def _A ( lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase_ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="crop_size" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" in size:
lowerCAmelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE__ )
elif "height" in size and "width" in size:
lowerCAmelCase__ = (size["height"], size["width"])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = to_numpy_array(SCREAMING_SNAKE_CASE__ )
if do_resize:
lowerCAmelCase__ = self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ )
if do_center_crop:
lowerCAmelCase__ = self.center_crop(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ )
if do_rescale:
lowerCAmelCase__ = self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ )
if do_normalize:
lowerCAmelCase__ = self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return image
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : int , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="crop_size" )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowerCAmelCase__ = make_batched(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = [
[
self._preprocess_image(
image=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=SCREAMING_SNAKE_CASE__ , do_rescale=SCREAMING_SNAKE_CASE__ , rescale_factor=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , )
for img in video
]
for video in videos
]
lowerCAmelCase__ = {"pixel_values": videos}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 61
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
__SCREAMING_SNAKE_CASE : List[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
__SCREAMING_SNAKE_CASE : Dict = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
return image
def A_ ( __SCREAMING_SNAKE_CASE : Any ) -> int:
if "visual_encoder" in key:
__SCREAMING_SNAKE_CASE : List[Any] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __SCREAMING_SNAKE_CASE )
if "blocks" in key:
__SCREAMING_SNAKE_CASE : int = re.sub(R'''blocks''' , '''layers''' , __SCREAMING_SNAKE_CASE )
if "attn" in key:
__SCREAMING_SNAKE_CASE : int = re.sub(R'''attn''' , '''self_attn''' , __SCREAMING_SNAKE_CASE )
if "norm1" in key:
__SCREAMING_SNAKE_CASE : Optional[int] = re.sub(R'''norm1''' , '''layer_norm1''' , __SCREAMING_SNAKE_CASE )
if "norm2" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''norm2''' , '''layer_norm2''' , __SCREAMING_SNAKE_CASE )
if "encoder.norm" in key:
__SCREAMING_SNAKE_CASE : int = re.sub(R'''encoder.norm''' , '''post_layernorm''' , __SCREAMING_SNAKE_CASE )
if "encoder.patch_embed.proj" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __SCREAMING_SNAKE_CASE )
if "encoder.pos_embed" in key:
__SCREAMING_SNAKE_CASE : Tuple = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __SCREAMING_SNAKE_CASE )
if "encoder.cls_token" in key:
__SCREAMING_SNAKE_CASE : List[str] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , __SCREAMING_SNAKE_CASE )
if "self_attn" in key:
__SCREAMING_SNAKE_CASE : List[Any] = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , __SCREAMING_SNAKE_CASE )
return key
@torch.no_grad()
def A_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=None ) -> Optional[Any]:
if config_path is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = BlipConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : List[str] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
__SCREAMING_SNAKE_CASE : int = BlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
__SCREAMING_SNAKE_CASE : Any = blip_decoder(pretrained=__SCREAMING_SNAKE_CASE , image_size=3_84 , vit='''base''' )
__SCREAMING_SNAKE_CASE : int = pt_model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : Union[str, Any] = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = rename_key(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = value
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = 3_84
__SCREAMING_SNAKE_CASE : List[Any] = load_demo_image(image_size=__SCREAMING_SNAKE_CASE , device='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(['''a picture of'''] ).input_ids
__SCREAMING_SNAKE_CASE : List[Any] = hf_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
__SCREAMING_SNAKE_CASE : List[str] = hf_model.generate(__SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__SCREAMING_SNAKE_CASE : Optional[int] = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
__SCREAMING_SNAKE_CASE : Optional[Any] = blip_vqa(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit='''base''' )
vqa_model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : Dict = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = rename_key(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = value
__SCREAMING_SNAKE_CASE : Optional[Any] = BlipForQuestionAnswering(__SCREAMING_SNAKE_CASE )
hf_vqa_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''How many dogs are in this image?''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE : Optional[int] = hf_vqa_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
__SCREAMING_SNAKE_CASE : Any = blip_itm(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit='''base''' )
itm_model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
__SCREAMING_SNAKE_CASE : Any = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = rename_key(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = value
__SCREAMING_SNAKE_CASE : Union[str, Any] = BlipForImageTextRetrieval(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = ['''A picture of a woman with a dog sitting in a beach''']
__SCREAMING_SNAKE_CASE : Any = tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding='''max_length''' , truncation=__SCREAMING_SNAKE_CASE , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__SCREAMING_SNAKE_CASE )
hf_itm_model.eval()
__SCREAMING_SNAKE_CASE : Dict = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_A = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 158
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["ViTFeatureExtractor"]
a_ = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), minimax(depth + 1, node_index * 2 + 1, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), )
return min(
minimax(depth + 1, node_index * 2, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), minimax(depth + 1, node_index * 2 + 1, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ), )
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : int = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
snake_case_ : Union[str, Any] = math.log(len(__SCREAMING_SNAKE_CASE ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 92
| 0
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase__ = Lock()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase__ : int = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase__ : Union[str, Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase__ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase__ : List[Any] = max(lowerCAmelCase__ , lowerCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : str = []
UpperCAmelCase__ : str = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase__ : Tuple = Pipe()
UpperCAmelCase__ : Dict = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase__ : Optional[Any] = temp_rs
UpperCAmelCase__ : int = temp_rr
for i in range(1 , len(lowerCAmelCase__ ) - 1 ):
UpperCAmelCase__ : Tuple = Pipe()
UpperCAmelCase__ : Any = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase__ : str = temp_rs
UpperCAmelCase__ : List[Any] = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(
len(lowerCAmelCase__ ) - 1,
arr[len(lowerCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a__ ( ) -> Any:
UpperCAmelCase__ : int = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*lowerCAmelCase__ )
UpperCAmelCase__ : str = odd_even_transposition(lowerCAmelCase__ )
print('''Sorted List\n''' )
print(*lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase = 1_00_00
__UpperCamelCase = None
__UpperCamelCase = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase = ParquetConfig
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
lowerCamelCase = data_files
if isinstance(_a , _a ):
lowerCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
lowerCamelCase = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
lowerCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase = [dl_manager.iter_files(_a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_a ):
with open(_a , """rb""" ) as f:
lowerCamelCase = datasets.Features.from_arrow_schema(pq.read_schema(_a ) )
break
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"""files""": files} ) )
return splits
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase = table_cast(_a , self.info.features.arrow_schema )
return pa_table
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , """rb""" ) as f:
lowerCamelCase = pq.ParquetFile(_a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCamelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(_a )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_a )}: {e}' )
raise
| 533
|
"""simple docstring"""
from functools import lru_cache
def a__ ( snake_case__ ) -> set:
lowerCamelCase = 2
lowerCamelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(snake_case__ )
if n > 1:
factors.add(snake_case__ )
return factors
@lru_cache
def a__ ( snake_case__ ) -> int:
return len(unique_prime_factors(snake_case__ ) )
def a__ ( snake_case__ ) -> bool:
return len(set(snake_case__ ) ) in (0, 1)
def a__ ( snake_case__ ) -> list:
lowerCamelCase = 2
while True:
# Increment each value of a generated range
lowerCamelCase = [base + i for i in range(snake_case__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCamelCase = [upf_len(snake_case__ ) for x in group]
checker.append(snake_case__ )
# If all numbers in the list are equal, return the group variable.
if equality(snake_case__ ):
return group
# Increment our base variable by 1
base += 1
def a__ ( snake_case__ = 4 ) -> int:
lowerCamelCase = run(snake_case__ )
return results[0] if len(snake_case__ ) else None
if __name__ == "__main__":
print(solution())
| 533
| 1
|
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' ,[
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ,__a : List[Any] ,__a : Tuple ,__a : List[Any] ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ,) -> Any:
"""simple docstring"""
_a : int = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
_a , _a : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
_a : Dict = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
assert base_extractor.is_extractable(__a )
_a : Tuple = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__a ,__a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_a : Dict = file_path.read_text(encoding='''utf-8''' )
else:
_a : List[Any] = output_path.read_text(encoding='''utf-8''' )
_a : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' ,[
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] ,)
def __UpperCAmelCase ( __a : Tuple ,__a : Any ,__a : int ,__a : Optional[int] ,__a : Dict ,__a : List[str] ,__a : Union[str, Any] ,__a : str ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ,) -> List[str]:
"""simple docstring"""
_a : int = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
_a : List[Any] = input_paths[compression_format]
if input_path is None:
_a : int = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__a )
_a : str = Extractor.infer_extractor_format(__a )
assert extractor_format is not None
_a : List[str] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__a ,__a ,__a )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_a : Any = file_path.read_text(encoding='''utf-8''' )
else:
_a : List[Any] = output_path.read_text(encoding='''utf-8''' )
_a : Tuple = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Any ) -> Tuple:
"""simple docstring"""
import tarfile
_a : Tuple = tmp_path / '''data_dot_dot'''
directory.mkdir()
_a : Any = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__a ,'''w''' ) as f:
f.add(__a ,arcname=os.path.join('''..''' ,text_file.name ) )
return path
@pytest.fixture
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
import tarfile
_a : Dict = tmp_path / '''data_sym_link'''
directory.mkdir()
_a : List[str] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' ,directory / '''subdir''' ,target_is_directory=__a )
with tarfile.TarFile(__a ,'''w''' ) as f:
f.add(str(directory / '''subdir''' ) ,arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' ,[('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] ,)
def __UpperCAmelCase ( __a : str ,__a : Optional[Any] ,__a : Dict ,__a : List[Any] ,__a : List[str] ,__a : Dict ) -> List[str]:
"""simple docstring"""
_a : Union[str, Any] = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
_a : Optional[int] = insecure_tar_files[insecure_tar_file]
_a : Optional[Any] = tmp_path / '''extracted'''
TarExtractor.extract(__a ,__a )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __UpperCAmelCase ( __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a : Any = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
_a : Union[str, Any] = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(__a )
assert zipfile.is_zipfile(str(__a ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__a ) # but we're right
| 14
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCamelCase__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : str = "blenderbot-small"
_SCREAMING_SNAKE_CASE : str = ["past_key_values"]
_SCREAMING_SNAKE_CASE : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self : Dict , snake_case_ : List[str]=5_0_2_6_5 , snake_case_ : str=5_1_2 , snake_case_ : Tuple=8 , snake_case_ : str=2_0_4_8 , snake_case_ : str=1_6 , snake_case_ : List[Any]=8 , snake_case_ : Any=2_0_4_8 , snake_case_ : List[str]=1_6 , snake_case_ : Dict=0.0 , snake_case_ : List[Any]=0.0 , snake_case_ : Optional[int]=True , snake_case_ : Union[str, Any]=True , snake_case_ : Tuple="gelu" , snake_case_ : Tuple=5_1_2 , snake_case_ : Dict=0.1 , snake_case_ : int=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : Any=0.02 , snake_case_ : str=1 , snake_case_ : Dict=False , snake_case_ : int=0 , snake_case_ : Optional[Any]=1 , snake_case_ : str=2 , snake_case_ : Any=2 , **snake_case_ : int , ):
__a : str = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : Union[str, Any] = d_model
__a : Optional[int] = encoder_ffn_dim
__a : Dict = encoder_layers
__a : Any = encoder_attention_heads
__a : Union[str, Any] = decoder_ffn_dim
__a : str = decoder_layers
__a : Optional[Any] = decoder_attention_heads
__a : List[str] = dropout
__a : List[Any] = attention_dropout
__a : Dict = activation_dropout
__a : Optional[Any] = activation_function
__a : Dict = init_std
__a : List[str] = encoder_layerdrop
__a : Dict = decoder_layerdrop
__a : int = use_cache
__a : List[Any] = encoder_layers
__a : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class UpperCamelCase__ ( lowercase__ ):
@property
def lowerCAmelCase (self : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
__a : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__a : Union[str, Any] = {0: "batch"}
__a : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a : Tuple = {0: "batch", 1: "decoder_sequence"}
__a : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__a : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
__a : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
__a : Optional[int] = {0: "batch", 2: "past_sequence + sequence"}
else:
__a : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def lowerCAmelCase (self : Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
__a : Union[str, Any] = super().outputs
else:
__a : int = super(__lowerCamelCase , self ).outputs
if self.use_past:
__a : Tuple = self.num_layers
for i in range(__lowerCamelCase ):
__a : Tuple = {0: "batch", 2: "past_sequence + sequence"}
__a : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase (self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
__a : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Generate decoder inputs
__a : List[str] = seq_length if not self.use_past else 1
__a : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__a : Optional[Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a : Optional[Any] = dict(**__lowerCamelCase , **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__a : Tuple = common_inputs["input_ids"].shape
__a : int = common_inputs["decoder_input_ids"].shape[1]
__a : Optional[Any] = self.num_attention_heads
__a : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a : Optional[int] = decoder_seq_length + 3
__a : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowerCamelCase , __lowerCamelCase )] , dim=1 )
__a : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a : str = self.num_layers
__a : Union[str, Any] = min(__lowerCamelCase , __lowerCamelCase )
__a : Union[str, Any] = max(__lowerCamelCase , __lowerCamelCase ) - min_num_layers
__a : str = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
__a : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def lowerCAmelCase (self : Tuple , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
__a : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__a : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a : str = seqlen + 2
__a : Optional[int] = self.num_layers
__a : int = self.num_attention_heads
__a : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a : Union[str, Any] = common_inputs["attention_mask"].dtype
__a : List[str] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
__a : Tuple = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
__a : str = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a : List[str] = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
__a : Dict = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a : Optional[int] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a : Optional[Any] = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return common_inputs
def lowerCAmelCase (self : Any , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
elif self.task == "causal-lm":
__a : Any = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
else:
__a : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
return common_inputs
def lowerCAmelCase (self : Any , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : str ):
if self.task in ["default", "seq2seq-lm"]:
__a : Dict = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
__a : int = super(__lowerCamelCase , self )._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
| 705
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __UpperCamelCase ( lowerCAmelCase__ : Dataset , lowerCAmelCase__ : Dict[str, str] ):
__a : Tuple = args.log_outputs
__a : Tuple = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
__a : Any = load_metric('''wer''' )
__a : int = load_metric('''cer''' )
# compute metrics
__a : Tuple = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
__a : Tuple = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
__a : List[Any] = f"WER: {wer_result}\nCER: {cer_result}"
print(lowerCAmelCase__ )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(lowerCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__a : str = f"log_{dataset_id}_predictions.txt"
__a : Any = f"log_{dataset_id}_targets.txt"
with open(lowerCAmelCase__ , '''w''' ) as p, open(lowerCAmelCase__ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCAmelCase__ , with_indices=lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__a : Union[str, Any] = re.sub(lowerCAmelCase__ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__a : List[str] = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
__a : Any = ''' '''.join(text.split(lowerCAmelCase__ ) )
return text
def __UpperCamelCase ( lowerCAmelCase__ : str ):
# load dataset
__a : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__a : str = AutoFeatureExtractor.from_pretrained(args.model_id )
__a : int = feature_extractor.sampling_rate
# resample audio
__a : Any = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCAmelCase__ ) )
# load eval pipeline
if args.device is None:
__a : List[str] = 0 if torch.cuda.is_available() else -1
__a : List[str] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase__ : Tuple ):
__a : Any = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__a : int = prediction['''text''']
__a : Optional[Any] = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
__a : List[Any] = dataset.map(lowerCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowercase__ =parser.parse_args()
main(args)
| 326
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
@lru_cache()
def A ( ) -> List[str]:
A__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
A__ = bs[:]
A__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
A__ = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def A ( __UpperCamelCase ) -> Union[str, Any]:
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
return pairs
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Any , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[int]="replace" , _snake_case : Optional[int]="<s>" , _snake_case : int="</s>" , _snake_case : Optional[int]="</s>" , _snake_case : Any="<s>" , _snake_case : int="<unk>" , _snake_case : str="<pad>" , _snake_case : List[Any]="<mask>" , _snake_case : Dict=False , **_snake_case : List[Any] , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else bos_token
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else eos_token
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else sep_token
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else cls_token
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
with open(_snake_case , encoding='utf-8' ) as vocab_handle:
A__ = json.load(_snake_case )
A__ = {v: k for k, v in self.encoder.items()}
A__ = errors # how to handle errors in decoding
A__ = bytes_to_unicode()
A__ = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case , encoding='utf-8' ) as merges_handle:
A__ = merges_handle.read().split('\n' )[1:-1]
A__ = [tuple(merge.split() ) for merge in bpe_merges]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = {}
A__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : Tuple , _snake_case : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A__ = tuple(_snake_case )
A__ = get_pairs(_snake_case )
if not pairs:
return token
while True:
A__ = min(_snake_case , key=lambda _snake_case : self.bpe_ranks.get(_snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(_snake_case ):
try:
A__ = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(_snake_case )
A__ = new_word
if len(_snake_case ) == 1:
break
else:
A__ = get_pairs(_snake_case )
A__ = ' '.join(_snake_case )
A__ = word
return word
def _a ( self : Dict , _snake_case : str ):
"""simple docstring"""
A__ = []
for token in re.findall(self.pat , _snake_case ):
A__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(' ' ) )
return bpe_tokens
def _a ( self : Union[str, Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def _a ( self : Tuple , _snake_case : List[Any] ):
"""simple docstring"""
return self.decoder.get(_snake_case )
def _a ( self : Optional[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = ''.join(_snake_case )
A__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _a ( self : List[str] , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + '\n' )
A__ = 0
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A__ = token_index
writer.write(' '.join(_snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Optional[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Tuple=False , **_snake_case : Tuple ):
"""simple docstring"""
A__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
A__ = ' ' + text
return (text, kwargs)
| 9
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE__ = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE__ = NewType('''DataClassType''', Any)
def A ( __UpperCamelCase ) -> List[Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A ( __UpperCamelCase ) -> Callable[[str], Any]:
A__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def A ( *,
__UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = dataclasses.MISSING , __UpperCamelCase = None , **__UpperCamelCase , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A__ = {}
if aliases is not None:
A__ = aliases
if help is not None:
A__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Iterable[DataClassType]
def __init__( self : Optional[int] , _snake_case : Union[DataClassType, Iterable[DataClassType]] , **_snake_case : Tuple ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A__ = ArgumentDefaultsHelpFormatter
super().__init__(**_snake_case )
if dataclasses.is_dataclass(_snake_case ):
A__ = [dataclass_types]
A__ = list(_snake_case )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_snake_case )
@staticmethod
def _a ( _snake_case : ArgumentParser , _snake_case : dataclasses.Field ):
"""simple docstring"""
A__ = F'''--{field.name}'''
A__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _snake_case ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A__ = kwargs.pop('aliases' , [] )
if isinstance(_snake_case , _snake_case ):
A__ = [aliases]
A__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_snake_case , 'UnionType' ) and isinstance(_snake_case , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_snake_case ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_snake_case ) not in field.type.__args__:
# filter `str` in Union
A__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A__ = (
field.type.__args__[0] if isinstance(_snake_case , field.type.__args__[1] ) else field.type.__args__[1]
)
A__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A__ = {}
if origin_type is Literal or (isinstance(field.type , _snake_case ) and issubclass(field.type , _snake_case )):
if origin_type is Literal:
A__ = field.type.__args__
else:
A__ = [x.value for x in field.type]
A__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A__ = field.default
else:
A__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A__ = copy(_snake_case )
# Hack because type=bool in argparse does not behave as we want.
A__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A__ = default
# This tells argparse we accept 0 or 1 value after --field_name
A__ = '?'
# This is the value that will get picked if we do --field_name (without value)
A__ = True
elif isclass(_snake_case ) and issubclass(_snake_case , _snake_case ):
A__ = field.type.__args__[0]
A__ = '+'
if field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
elif field.default is dataclasses.MISSING:
A__ = True
else:
A__ = field.type
if field.default is not dataclasses.MISSING:
A__ = field.default
elif field.default_factory is not dataclasses.MISSING:
A__ = field.default_factory()
else:
A__ = True
parser.add_argument(_snake_case , *_snake_case , **_snake_case )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A__ = False
parser.add_argument(F'''--no_{field.name}''' , action='store_false' , dest=field.name , **_snake_case )
def _a ( self : Any , _snake_case : DataClassType ):
"""simple docstring"""
if hasattr(_snake_case , '_argument_group_name' ):
A__ = self.add_argument_group(dtype._argument_group_name )
else:
A__ = self
try:
A__ = get_type_hints(_snake_case )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_snake_case ):
A__ = '.'.join(map(_snake_case , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_snake_case ):
if not field.init:
continue
A__ = type_hints[field.name]
self._parse_dataclass_field(_snake_case , _snake_case )
def _a ( self : Optional[int] , _snake_case : Optional[Any]=None , _snake_case : Any=False , _snake_case : int=True , _snake_case : List[Any]=None , _snake_case : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A__ = []
if args_filename:
args_files.append(Path(_snake_case ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A__ = ArgumentParser()
args_file_parser.add_argument(_snake_case , type=_snake_case , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A__ , A__ = args_file_parser.parse_known_args(args=_snake_case )
A__ = vars(_snake_case ).get(args_file_flag.lstrip('-' ) , _snake_case )
if cmd_args_file_paths:
args_files.extend([Path(_snake_case ) for p in cmd_args_file_paths] )
A__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A__ = file_args + args if args is not None else file_args + sys.argv[1:]
A__ , A__ = self.parse_known_args(args=_snake_case )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in vars(_snake_case ).items() if k in keys}
for k in keys:
delattr(_snake_case , _snake_case )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_snake_case )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _a ( self : Dict , _snake_case : Dict[str, Any] , _snake_case : bool = False ):
"""simple docstring"""
A__ = set(args.keys() )
A__ = []
for dtype in self.dataclass_types:
A__ = {f.name for f in dataclasses.fields(_snake_case ) if f.init}
A__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A__ = dtype(**_snake_case )
outputs.append(_snake_case )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_snake_case )}''' )
return tuple(_snake_case )
def _a ( self : Dict , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
with open(Path(_snake_case ) , encoding='utf-8' ) as open_json_file:
A__ = json.loads(open_json_file.read() )
A__ = self.parse_dict(_snake_case , allow_extra_keys=_snake_case )
return tuple(_snake_case )
def _a ( self : Tuple , _snake_case : str , _snake_case : bool = False ):
"""simple docstring"""
A__ = self.parse_dict(yaml.safe_load(Path(_snake_case ).read_text() ) , allow_extra_keys=_snake_case )
return tuple(_snake_case )
| 9
| 1
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
snake_case__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> np.ndarray:
if self.framework == "tf":
UpperCamelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase )
else:
raise ValueError('Unsupported framework' )
return masked_index
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> np.ndarray:
UpperCamelCase_ = self.get_masked_index(_UpperCAmelCase )
UpperCamelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCamelCase_ = self.framework
UpperCamelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.ensure_exactly_one_mask_token(_UpperCAmelCase )
return model_inputs
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = self.model(**_UpperCAmelCase )
UpperCamelCase_ = model_inputs['input_ids']
return model_outputs
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase_ = target_ids.shape[0]
UpperCamelCase_ = model_outputs['input_ids'][0]
UpperCamelCase_ = model_outputs['logits']
if self.framework == "tf":
UpperCamelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase_ = outputs.numpy()
UpperCamelCase_ = outputs[0, masked_index, :]
UpperCamelCase_ = stable_softmax(_UpperCAmelCase , axis=-1 )
if target_ids is not None:
UpperCamelCase_ = tf.gather_nd(tf.squeeze(_UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase_ = tf.expand_dims(_UpperCAmelCase , 0 )
UpperCamelCase_ = tf.math.top_k(_UpperCAmelCase , k=_UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase_ = outputs[0, masked_index, :]
UpperCamelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase_ = probs[..., target_ids]
UpperCamelCase_ , UpperCamelCase_ = probs.topk(_UpperCAmelCase )
UpperCamelCase_ = []
UpperCamelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase_ = target_ids[p].tolist()
UpperCamelCase_ = p
# Filter padding out:
UpperCamelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(_UpperCAmelCase )
result.append(_UpperCAmelCase )
if single_mask:
return result[0]
return result
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Dict:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = [targets]
try:
UpperCamelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase_ = {}
UpperCamelCase_ = []
for target in targets:
UpperCamelCase_ = vocab.get(_UpperCAmelCase , _UpperCAmelCase )
if id_ is None:
UpperCamelCase_ = self.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , max_length=1 , truncation=_UpperCAmelCase , )['input_ids']
if len(_UpperCAmelCase ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
UpperCamelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCamelCase_ = list(set(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) == 0:
raise ValueError('At least one target must be provided when passed.' )
UpperCamelCase_ = np.array(_UpperCAmelCase )
return target_ids
def _UpperCAmelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Optional[int]:
UpperCamelCase_ = {}
if targets is not None:
UpperCamelCase_ = self.get_target_ids(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = target_ids
if top_k is not None:
UpperCamelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 618
|
def _snake_case (__lowercase , __lowercase , __lowercase):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowercase))
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
# Base Case
if index == len(__lowercase):
return True
# Recursive Step
for i in range(__lowercase):
if valid_coloring(graph[index] , __lowercase , __lowercase):
# Color current vertex
UpperCamelCase_ = i
# Validate coloring
if util_color(__lowercase , __lowercase , __lowercase , index + 1):
return True
# Backtrack
UpperCamelCase_ = -1
return False
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = [-1] * len(__lowercase)
if util_color(__lowercase , __lowercase , __lowercase , 0):
return colored_vertices
return []
| 618
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase__ , id=lowerCamelCase__ )
| 457
|
import math
import tensorflow as tf
from packaging import version
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(math.pi , x.dtype )
lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) ))
return x * cdf
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(0.0_4_4_7_1_5 , x.dtype )
lowerCamelCase = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
lowerCamelCase = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCamelCase ( lowerCamelCase__ : Dict ):
'''simple docstring'''
return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -10 , 10 )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Dict=-1 ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ )
return a * tf.math.sigmoid(lowerCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ )
UpperCAmelCase : Union[str, Any] = tf.keras.activations.gelu
UpperCAmelCase : int = approximate_gelu_wrap
else:
UpperCAmelCase : Dict = _gelu
UpperCAmelCase : Dict = _gelu_new
UpperCAmelCase : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __lowerCamelCase ( lowerCamelCase__ : Tuple ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 457
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__A : Dict = logging.get_logger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : str , *lowerCamelCase : int , **lowerCamelCase : Tuple ) -> None:
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 712
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__A : List[Any] = 5_0000
__A : str = 5000
__A , __A : List[str] = os.path.split(__file__)
__A : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : List[Any] ):
'''simple docstring'''
for i in range(A__ ):
lowerCAmelCase_ : str = dataset[i]
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : Dict , A__ : Union[str, Any] ):
'''simple docstring'''
for i in range(0 , len(A__ ) , A__ ):
lowerCAmelCase_ : Optional[int] = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : Union[str, Any] , A__ : List[str] ):
'''simple docstring'''
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
lowerCAmelCase_ : List[Any] = dataset[i]
@get_duration
def UpperCamelCase_ ( A__ : datasets.Dataset , A__ : Union[str, Any] , A__ : Optional[Any] , A__ : int ):
'''simple docstring'''
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
lowerCAmelCase_ : Tuple = dataset[i : i + batch_size]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
lowerCAmelCase_ : List[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
lowerCAmelCase_ : Dict = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
lowerCAmelCase_ : Dict = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
lowerCAmelCase_ : str = generate_example_dataset(
os.path.join(A__ , """dataset.arrow""" ) , A__ , num_examples=A__ , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
lowerCAmelCase_ : List[str] = func(A__ , **A__ )
print("""shuffling dataset""" )
lowerCAmelCase_ : Tuple = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(A__ ) )
lowerCAmelCase_ : List[str] = func(
A__ , **A__ )
with open(A__ , """wb""" ) as f:
f.write(json.dumps(A__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 398
| 0
|
'''simple docstring'''
def A_( A : int):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A_( A : int = 100_0000):
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(A) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3
|
"""simple docstring"""
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a_ ( _lowerCAmelCase : bytes , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = f"""{sampling_rate}"""
lowercase__ : str = '1'
lowercase__ : Dict = 'f32le'
lowercase__ : Optional[Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_lowerCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase__ : str = ffmpeg_process.communicate(_lowerCAmelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowercase__ : Optional[Any] = output_stream[0]
lowercase__ : Tuple = np.frombuffer(_lowerCAmelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : str = "f32le" , ):
'''simple docstring'''
lowercase__ : List[str] = f"""{sampling_rate}"""
lowercase__ : int = '1'
if format_for_conversion == "s16le":
lowercase__ : Optional[int] = 2
elif format_for_conversion == "f32le":
lowercase__ : List[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowercase__ : int = platform.system()
if system == "Linux":
lowercase__ : str = 'alsa'
lowercase__ : Dict = 'default'
elif system == "Darwin":
lowercase__ : int = 'avfoundation'
lowercase__ : int = ':0'
elif system == "Windows":
lowercase__ : List[Any] = 'dshow'
lowercase__ : Optional[int] = 'default'
lowercase__ : Optional[int] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowercase__ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase__ : int = _ffmpeg_stream(_lowerCAmelCase , _lowerCAmelCase )
for item in iterator:
yield item
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[Tuple[float, float], float]] = None , _lowerCAmelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
lowercase__ : List[Any] = stream_chunk_s
else:
lowercase__ : Optional[int] = chunk_length_s
lowercase__ : Union[str, Any] = ffmpeg_microphone(_lowerCAmelCase , _lowerCAmelCase , format_for_conversion=_lowerCAmelCase )
if format_for_conversion == "s16le":
lowercase__ : List[str] = np.intaa
lowercase__ : str = 2
elif format_for_conversion == "f32le":
lowercase__ : Optional[int] = np.floataa
lowercase__ : Optional[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowercase__ : Tuple = chunk_length_s / 6
lowercase__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCAmelCase , (int, float) ):
lowercase__ : Any = [stride_length_s, stride_length_s]
lowercase__ : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase__ : Union[str, Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase__ : Optional[int] = datetime.datetime.now()
lowercase__ : List[str] = datetime.timedelta(seconds=_lowerCAmelCase )
for item in chunk_bytes_iter(_lowerCAmelCase , _lowerCAmelCase , stride=(stride_left, stride_right) , stream=_lowerCAmelCase ):
# Put everything back in numpy scale
lowercase__ : int = np.frombuffer(item['raw'] , dtype=_lowerCAmelCase )
lowercase__ : Dict = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowercase__ : List[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Tuple[int, int] , _lowerCAmelCase : bool = False ):
'''simple docstring'''
lowercase__ : int = B''
lowercase__ , lowercase__ : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowercase__ : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCAmelCase ) < chunk_len:
lowercase__ : int = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCAmelCase ) >= chunk_len:
# We are flushing the accumulator
lowercase__ : Optional[Any] = (_stride_left, stride_right)
lowercase__ : str = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowercase__ : int = False
yield item
lowercase__ : str = stride_left
lowercase__ : str = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCAmelCase ) > stride_left:
lowercase__ : Union[str, Any] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowercase__ : Any = False
yield item
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : int = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCAmelCase , stdout=subprocess.PIPE , bufsize=_lowerCAmelCase ) as ffmpeg_process:
while True:
lowercase__ : Union[str, Any] = ffmpeg_process.stdout.read(_lowerCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 599
| 0
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
_SCREAMING_SNAKE_CASE : List[Any] = F"https://www.google.com/search?q={query}&num=100"
_SCREAMING_SNAKE_CASE : Optional[int] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
_SCREAMING_SNAKE_CASE : List[str] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
_SCREAMING_SNAKE_CASE : Dict = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 701
|
from collections.abc import Callable
import numpy as np
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ = ya
SCREAMING_SNAKE_CASE__ = xa
for k in range(_A ):
SCREAMING_SNAKE_CASE__ = y[k] + step_size * ode_func(_A , y[k] )
SCREAMING_SNAKE_CASE__ = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 472
| 0
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_lowerCamelCase : str = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_lowerCamelCase : Dict = {
"abeja/gpt-neox-japanese-2.7b": 2_0_4_8,
}
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ):
'''simple docstring'''
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : str = json.loads(f.read() )
_lowerCAmelCase : List[str] = collections.OrderedDict()
_lowerCAmelCase : str = collections.OrderedDict()
_lowerCAmelCase : Optional[int] = collections.OrderedDict()
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Any = f.readlines()
_lowerCAmelCase : str = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(UpperCamelCase_ ):
_lowerCAmelCase : Tuple = b
_lowerCAmelCase : int = idx
for wd in b:
_lowerCAmelCase : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __snake_case (_a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]="<|endoftext|>" , _UpperCAmelCase : int="<|endoftext|>" , _UpperCAmelCase : int="<|startoftext|>" , _UpperCAmelCase : Dict="<|endoftext|>" , _UpperCAmelCase : List[Any]=False , **_UpperCAmelCase : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , do_clean_text=_UpperCAmelCase , **_UpperCAmelCase , )
if not os.path.isfile(_UpperCAmelCase ):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_UpperCAmelCase ):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
_lowerCAmelCase : Any = do_clean_text
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = load_vocab_and_emoji(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Any = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
'''simple docstring'''
return len(self.raw_vocab )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
return self.subword_tokenizer.tokenize(_UpperCAmelCase , clean=self.do_clean_text )
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """""".join(_UpperCAmelCase ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : "Conversation" ) -> List[int]:
'''simple docstring'''
_lowerCAmelCase : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] )
if len(_UpperCAmelCase ) > self.model_max_length:
_lowerCAmelCase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowerCAmelCase : List[str] = 0
if os.path.isdir(_UpperCAmelCase ):
_lowerCAmelCase : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Any = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
_lowerCAmelCase : Optional[Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
_lowerCAmelCase : List[Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase : str = token_index
writer.write(""",""".join(_UpperCAmelCase ) + """\n""" )
index += 1
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _UpperCAmelCase )
return vocab_file, emoji_file
class __snake_case (_a ):
def __init__( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[str] = vocab # same as swe
_lowerCAmelCase : Union[str, Any] = ids_to_tokens # same as bpe
_lowerCAmelCase : Union[str, Any] = emoji
_lowerCAmelCase : Optional[Any] = np.max([len(_UpperCAmelCase ) for w in self.vocab.keys()] )
_lowerCAmelCase : Tuple = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
_lowerCAmelCase : Tuple = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
_lowerCAmelCase : List[Any] = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
_lowerCAmelCase : Any = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
_lowerCAmelCase : Optional[Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
_lowerCAmelCase : Tuple = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
_lowerCAmelCase : str = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
_lowerCAmelCase : List[Any] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
_lowerCAmelCase : Optional[int] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.ids_to_tokens )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.content_repattera.sub("""<URL>""" , _UpperCAmelCase )
_lowerCAmelCase : List[str] = self.content_repattera.sub("""<EMAIL>""" , _UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = self.content_repattera.sub("""<TEL>""" , _UpperCAmelCase )
_lowerCAmelCase : str = self.content_repattera.sub("""<DATE>""" , _UpperCAmelCase )
_lowerCAmelCase : str = self.content_repattera.sub("""<DATE>""" , _UpperCAmelCase )
_lowerCAmelCase : str = self.content_repattera.sub("""<PRICE>""" , _UpperCAmelCase )
_lowerCAmelCase : Optional[int] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_lowerCAmelCase : int = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str=False ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = text.replace(""" """ , """<SP>""" )
_lowerCAmelCase : Tuple = text.replace(""" """ , """<SP>""" )
_lowerCAmelCase : Optional[int] = text.replace("""\r\n""" , """<BR>""" )
_lowerCAmelCase : str = text.replace("""\n""" , """<BR>""" )
_lowerCAmelCase : Any = text.replace("""\r""" , """<BR>""" )
_lowerCAmelCase : Optional[Any] = text.replace("""\t""" , """<TAB>""" )
_lowerCAmelCase : Tuple = text.replace("""—""" , """ー""" )
_lowerCAmelCase : Tuple = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
_lowerCAmelCase : Union[str, Any] = text.replace(_UpperCAmelCase , _UpperCAmelCase )
if clean:
_lowerCAmelCase : Any = self.clean_text(_UpperCAmelCase )
def check_simbol(_UpperCAmelCase : int ):
_lowerCAmelCase : Optional[int] = x.encode()
if len(_UpperCAmelCase ) == 1 and len(_UpperCAmelCase ) == 2:
_lowerCAmelCase : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(_UpperCAmelCase : int ):
_lowerCAmelCase : Optional[int] = x.encode()
if len(_UpperCAmelCase ) == 1 and len(_UpperCAmelCase ) == 3:
_lowerCAmelCase : Any = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = []
while pos < len(_UpperCAmelCase ):
_lowerCAmelCase : Optional[Any] = min(len(_UpperCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
_lowerCAmelCase : List[Any] = [] # (token_id, token, pos)
for e in range(_UpperCAmelCase , _UpperCAmelCase , -1 ):
_lowerCAmelCase : Tuple = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_UpperCAmelCase ) > 2:
_lowerCAmelCase : Dict = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_UpperCAmelCase ) > 0:
# the smallest token_id is adopted
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[0] )[0]
result.append(_UpperCAmelCase )
_lowerCAmelCase : List[str] = e
else:
_lowerCAmelCase : Any = pos + 1
_lowerCAmelCase : Optional[int] = text[pos:end]
if check_simbol(_UpperCAmelCase ):
result.append("""<KIGOU>""" )
elif checkuae(_UpperCAmelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
_lowerCAmelCase : str = end
return result
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : int="\n" ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_UpperCAmelCase ) > 0:
words.append(bytearray(_UpperCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
_lowerCAmelCase : Any = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_UpperCAmelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
words.append(bytearray(_UpperCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
_lowerCAmelCase : Dict = """""".join(_UpperCAmelCase )
return text
| 429
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCamelCase : Optional[Any] = {
"169M": 1_2,
"430M": 2_4,
"1B5": 2_4,
"3B": 3_2,
"7B": 3_2,
"14B": 4_0,
}
_lowerCamelCase : int = {
"169M": 7_6_8,
"430M": 1_0_2_4,
"1B5": 2_0_4_8,
"3B": 2_5_6_0,
"7B": 4_0_9_6,
"14B": 5_1_2_0,
}
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = list(state_dict.keys() )
for name in state_dict_keys:
_lowerCAmelCase : str = state_dict.pop(UpperCamelCase_ )
# emb -> embedding
if name.startswith("""emb.""" ):
_lowerCAmelCase : str = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_lowerCAmelCase : Tuple = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_lowerCAmelCase : Dict = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase_ )
# ffn -> feed_forward
_lowerCAmelCase : List[Any] = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_lowerCAmelCase : Dict = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_lowerCAmelCase : Any = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_lowerCAmelCase : List[str] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_lowerCAmelCase : Optional[int] = """rwkv.""" + name
_lowerCAmelCase : List[Any] = weight
return state_dict
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_lowerCAmelCase : List[str] = 50277
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_lowerCAmelCase : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase_ )
_lowerCAmelCase : int = len(UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
# 2. Build the config
_lowerCAmelCase : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_lowerCAmelCase : Any = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
_lowerCAmelCase : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase_ )
# 3. Download model file then convert state_dict
_lowerCAmelCase : Optional[Any] = hf_hub_download(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = torch.load(UpperCamelCase_ , map_location="""cpu""" )
_lowerCAmelCase : Any = convert_state_dict(UpperCamelCase_ )
# 4. Split in shards and save
_lowerCAmelCase , _lowerCAmelCase : Dict = shard_checkpoint(UpperCamelCase_ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
if index is not None:
_lowerCAmelCase : Tuple = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
# Save the index as well
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Optional[Any] = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + """\n"""
f.write(UpperCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_lowerCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_lowerCAmelCase : int = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 429
| 1
|
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = sorted(numsa + numsa )
A , A : List[Any] = divmod(len(_lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = [float(x) for x in input("""Enter the elements of first array: """).split()]
__SCREAMING_SNAKE_CASE = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 17
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=3 , __lowerCamelCase : Dict=32 , __lowerCamelCase : Any=3 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : str=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : Any=1 , ) -> int:
A : Optional[int] = parent
A : List[str] = batch_size
A : Tuple = image_size
A : List[str] = num_channels
A : List[str] = embeddings_size
A : List[str] = hidden_sizes
A : str = depths
A : Optional[Any] = is_training
A : int = use_labels
A : Optional[int] = hidden_act
A : List[Any] = num_labels
A : List[str] = scope
A : str = len(__lowerCamelCase )
A : Optional[int] = out_features
A : str = out_indices
A : Optional[int] = num_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Optional[int] = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
A : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
A : Any = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
A : Union[str, Any] = self.num_labels
A : List[str] = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ) -> List[Any]:
A : Dict = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[Any] = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Optional[Any] = None
A : Optional[int] = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
A : List[str] = self.prepare_config_and_inputs()
A , A , A : Tuple = config_and_inputs
A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A : Any = BitModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
return
@unittest.skip(reason="Bit does not output attentions" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
A : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[Any] = [*signature.parameters.keys()]
A : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
A , A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Optional[int] = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
A : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : Dict = layer_type
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
A : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Tuple = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : Union[str, Any] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
A : Union[str, Any] = BitModelTester(self )
| 17
| 1
|
"""simple docstring"""
from statistics import mean, stdev
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : list , UpperCamelCase : int = 3 ):
A__ = min(UpperCamelCase )
A__ = max(UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , UpperCamelCase ) for x in data]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : list , UpperCamelCase : int = 3 ):
A__ = mean(UpperCamelCase )
A__ = stdev(UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma) , UpperCamelCase ) for x in data]
| 574
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str ):
def get_masked_lm_array(UpperCamelCase : str ):
A__ = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_array(UpperCamelCase : str ):
A__ = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_layer_array(UpperCamelCase : int , UpperCamelCase : str ):
A__ = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_attention_layer_array(UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int ):
A__ = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
A__ = array.reshape(UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
print(F"""Loading model based on config from {config_path}...""" )
A__ = BertConfig.from_json_file(UpperCamelCase )
A__ = BertForMaskedLM(UpperCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ = model.bert.encoder.layer[layer_index]
# Self-attention
A__ = layer.attention.self
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ = layer.attention.output
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
A__ = layer.intermediate
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/bias""" )
# Output
A__ = layer.output
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/bias""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/beta""" )
# Embeddings
A__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
A__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
A__ = model.cls.predictions.transform
A__ = get_masked_lm_array("""dense/kernel""" )
A__ = get_masked_lm_array("""dense/bias""" )
A__ = get_masked_lm_array("""layer_norm/gamma""" )
A__ = get_masked_lm_array("""layer_norm/beta""" )
A__ = get_masked_lm_array("""embedding_table""" )
# Pooling
A__ = BertPooler(config=UpperCamelCase )
A__ = get_encoder_array("""_pooler_layer/kernel""" )
A__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase )
# Integration test - should load without any errors ;)
A__ = BertForMaskedLM.from_pretrained(UpperCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowerCamelCase__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 574
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A = logging.get_logger(__name__)
A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
A = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class __snake_case ( a__):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['''input_ids''', '''attention_mask''']
_lowerCAmelCase = RobertaTokenizer
def __init__( self, A=None, A=None, A=None, A="replace", A="<s>", A="</s>", A="</s>", A="<s>", A="<unk>", A="<pad>", A="<mask>", A=False, A=True, **A, ):
"""simple docstring"""
super().__init__(
A, A, tokenizer_file=A, errors=A, bos_token=A, eos_token=A, sep_token=A, cls_token=A, unk_token=A, pad_token=A, mask_token=A, add_prefix_space=A, trim_offsets=A, **A, )
lowerCamelCase : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', A ) != add_prefix_space:
lowerCamelCase : Union[str, Any] = getattr(A, pre_tok_state.pop('type' ) )
lowerCamelCase : Optional[Any] = add_prefix_space
lowerCamelCase : Any = pre_tok_class(**A )
lowerCamelCase : Optional[int] = add_prefix_space
lowerCamelCase : int = 'post_processor'
lowerCamelCase : Dict = getattr(self.backend_tokenizer, A, A )
if tokenizer_component_instance:
lowerCamelCase : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase : Union[str, Any] = tuple(state['sep'] )
if "cls" in state:
lowerCamelCase : Any = tuple(state['cls'] )
lowerCamelCase : Dict = False
if state.get('add_prefix_space', A ) != add_prefix_space:
lowerCamelCase : Tuple = add_prefix_space
lowerCamelCase : int = True
if state.get('trim_offsets', A ) != trim_offsets:
lowerCamelCase : List[str] = trim_offsets
lowerCamelCase : str = True
if changes_to_apply:
lowerCamelCase : Any = getattr(A, state.pop('type' ) )
lowerCamelCase : Optional[int] = component_class(**A )
setattr(self.backend_tokenizer, A, A )
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = AddedToken(A, lstrip=A, rstrip=A ) if isinstance(A, A ) else value
lowerCamelCase : List[Any] = value
def UpperCAmelCase_ ( self, *A, **A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = kwargs.get('is_split_into_words', A )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A, **A )
def UpperCAmelCase_ ( self, *A, **A ):
"""simple docstring"""
lowerCamelCase : Any = kwargs.get('is_split_into_words', A )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A, **A )
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self._tokenizer.model.save(A, name=A )
return tuple(A )
def UpperCAmelCase_ ( self, A, A=None ):
"""simple docstring"""
lowerCamelCase : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = [self.sep_token_id]
lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 449
|
'''simple docstring'''
from manim import *
class __snake_case ( a__):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = Rectangle(height=0.5, width=0.5 )
lowerCamelCase : List[Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowerCamelCase : List[str] = [mem.copy() for i in range(6 )]
lowerCamelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase : str = VGroup(*A ).arrange(A, buff=0 )
lowerCamelCase : Any = VGroup(*A ).arrange(A, buff=0 )
lowerCamelCase : Dict = VGroup(A, A ).arrange(A, buff=0 )
lowerCamelCase : str = Text('CPU', font_size=24 )
lowerCamelCase : int = Group(A, A ).arrange(A, buff=0.5, aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
lowerCamelCase : Optional[int] = [mem.copy() for i in range(1 )]
lowerCamelCase : Union[str, Any] = VGroup(*A ).arrange(A, buff=0 )
lowerCamelCase : Optional[Any] = Text('GPU', font_size=24 )
lowerCamelCase : Tuple = Group(A, A ).arrange(A, buff=0.5, aligned_edge=A )
gpu.align_to(A, A )
gpu.set_x(gpu.get_x() - 1 )
self.add(A )
lowerCamelCase : Optional[int] = [mem.copy() for i in range(6 )]
lowerCamelCase : Optional[Any] = VGroup(*A ).arrange(A, buff=0 )
lowerCamelCase : Any = Text('Model', font_size=24 )
lowerCamelCase : Tuple = Group(A, A ).arrange(A, buff=0.5, aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.play(
Create(A, run_time=1 ), Create(A, run_time=1 ), Create(A, run_time=1 ), )
lowerCamelCase : str = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, )
lowerCamelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase : Tuple = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(A, run_time=2.5 ), Write(A ), Write(A ) )
self.add(A )
lowerCamelCase : str = []
lowerCamelCase : Optional[int] = []
lowerCamelCase : Optional[Any] = []
for i, rect in enumerate(A ):
lowerCamelCase : List[str] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(A, opacity=0.7 )
cpu_target.move_to(A )
cpu_target.generate_target()
lowerCamelCase : int = 0.46 / 4
lowerCamelCase : Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=A )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=A, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=A, buff=0.0 )
cpu_targs.append(A )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A ) )
second_animations.append(MoveToTarget(A, run_time=1.5 ) )
self.play(*A )
self.play(*A )
self.wait()
| 449
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 391
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self: List[Any] , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Tuple ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 391
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 467
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class a ( unittest.TestCase , UpperCAmelCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = load_tool("text-question-answering" )
self.tool.setup()
_UpperCAmelCase : str = load_tool("text-question-answering" , remote=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.tool(A_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.remote_tool(A_ , "What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = self.tool(text=A_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.remote_tool(text=A_ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(A_ , "launched the BigScience Research Workshop" )
| 467
| 1
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase_ ( a_ , a_ ):
__UpperCAmelCase = 'pixel_values'
__UpperCAmelCase = False
__UpperCAmelCase = TimmBackboneConfig
def __init__( self : Tuple, _snake_case : int, **_snake_case : List[Any] ):
'''simple docstring'''
requires_backends(self, '''timm''' )
super().__init__(_snake_case )
snake_case : Union[str, Any] =config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(_snake_case, '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
snake_case : Union[str, Any] =getattr(_snake_case, '''use_pretrained_backbone''', _snake_case )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
snake_case : str =config.out_indices if getattr(_snake_case, '''out_indices''', _snake_case ) is not None else (-1,)
snake_case : Optional[int] =timm.create_model(
config.backbone, pretrained=_snake_case, features_only=config.features_only, in_chans=config.num_channels, out_indices=_snake_case, **_snake_case, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case : Dict =self._backbone.return_layers
snake_case : Any ={layer['''module''']: str(_snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_snake_case )
@classmethod
def __snake_case ( cls : List[str], _snake_case : List[str], *_snake_case : str, **_snake_case : str ):
'''simple docstring'''
requires_backends(cls, ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
snake_case : str =kwargs.pop('''config''', TimmBackboneConfig() )
snake_case : Any =kwargs.pop('''use_timm_backbone''', _snake_case )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
snake_case : List[Any] =kwargs.pop('''num_channels''', config.num_channels )
snake_case : int =kwargs.pop('''features_only''', config.features_only )
snake_case : Optional[Any] =kwargs.pop('''use_pretrained_backbone''', config.use_pretrained_backbone )
snake_case : Union[str, Any] =kwargs.pop('''out_indices''', config.out_indices )
snake_case : int =TimmBackboneConfig(
backbone=_snake_case, num_channels=_snake_case, features_only=_snake_case, use_pretrained_backbone=_snake_case, out_indices=_snake_case, )
return super()._from_config(_snake_case, **_snake_case )
def __snake_case ( self : Optional[Any], _snake_case : str ):
'''simple docstring'''
pass
def __snake_case ( self : str, _snake_case : Union[str, Any], _snake_case : int=None, _snake_case : Any=None, _snake_case : Union[str, Any]=None, **_snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case : List[Any] =return_dict if return_dict is not None else self.config.use_return_dict
snake_case : Any =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Tuple =output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case : str =self._all_layers
snake_case : Union[str, Any] =self._backbone(_snake_case, **_snake_case )
snake_case : List[str] =self._return_layers
snake_case : List[Any] =tuple(hidden_states[i] for i in self.out_indices )
else:
snake_case : Optional[int] =self._backbone(_snake_case, **_snake_case )
snake_case : Union[str, Any] =None
snake_case : int =tuple(_snake_case )
snake_case : List[str] =tuple(_snake_case ) if hidden_states is not None else None
if not return_dict:
snake_case : List[Any] =(feature_maps,)
if output_hidden_states:
snake_case : Tuple =output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_snake_case, hidden_states=_snake_case, attentions=_snake_case )
| 349
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A : Optional[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = 'perceiver'
def __init__( self : List[str], _snake_case : Optional[Any]=256, _snake_case : int=1_280, _snake_case : Optional[int]=768, _snake_case : List[str]=1, _snake_case : str=26, _snake_case : Union[str, Any]=8, _snake_case : Optional[int]=8, _snake_case : Optional[int]=None, _snake_case : str=None, _snake_case : List[str]="kv", _snake_case : str=1, _snake_case : Optional[Any]=1, _snake_case : str="gelu", _snake_case : List[Any]=0.1, _snake_case : Any=0.02, _snake_case : Union[str, Any]=1E-12, _snake_case : str=True, _snake_case : Any=262, _snake_case : Union[str, Any]=2_048, _snake_case : List[str]=56, _snake_case : Tuple=[368, 496], _snake_case : Dict=16, _snake_case : Tuple=1_920, _snake_case : Optional[Any]=16, _snake_case : Optional[Any]=[1, 16, 224, 224], **_snake_case : Optional[Any], ):
'''simple docstring'''
super().__init__(**_snake_case )
snake_case : Union[str, Any] =num_latents
snake_case : str =d_latents
snake_case : Any =d_model
snake_case : Any =num_blocks
snake_case : Tuple =num_self_attends_per_block
snake_case : int =num_self_attention_heads
snake_case : str =num_cross_attention_heads
snake_case : List[Any] =qk_channels
snake_case : Tuple =v_channels
snake_case : str =cross_attention_shape_for_attention
snake_case : Union[str, Any] =self_attention_widening_factor
snake_case : Union[str, Any] =cross_attention_widening_factor
snake_case : Optional[int] =hidden_act
snake_case : Any =attention_probs_dropout_prob
snake_case : int =initializer_range
snake_case : str =layer_norm_eps
snake_case : Dict =use_query_residual
# masked language modeling attributes
snake_case : List[Any] =vocab_size
snake_case : List[Any] =max_position_embeddings
# image classification attributes
snake_case : List[str] =image_size
# flow attributes
snake_case : Optional[Any] =train_size
# multimodal autoencoding attributes
snake_case : Dict =num_frames
snake_case : Optional[Any] =audio_samples_per_frame
snake_case : Dict =samples_per_patch
snake_case : Union[str, Any] =output_shape
class lowerCAmelCase_ ( a_ ):
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Tuple ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case : Union[str, Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return 1E-4
def __snake_case ( self : Dict, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional[TensorType] = None, _snake_case : int = 3, _snake_case : int = 40, _snake_case : int = 40, ):
'''simple docstring'''
if isinstance(_snake_case, _snake_case ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : Any =compute_effective_axis_dimension(
_snake_case, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : Tuple =preprocessor.num_special_tokens_to_add(_snake_case )
snake_case : Optional[Any] =compute_effective_axis_dimension(
_snake_case, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=_snake_case )
# Generate dummy inputs according to compute batch and sequence
snake_case : str =[''' '''.join(['''a'''] ) * seq_length] * batch_size
snake_case : int =dict(preprocessor(_snake_case, return_tensors=_snake_case ) )
snake_case : List[str] =inputs.pop('''input_ids''' )
return inputs
elif isinstance(_snake_case, _snake_case ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : Union[str, Any] =compute_effective_axis_dimension(_snake_case, fixed_dimension=OnnxConfig.default_fixed_batch )
snake_case : Dict =self._generate_dummy_images(_snake_case, _snake_case, _snake_case, _snake_case )
snake_case : Optional[Any] =dict(preprocessor(images=_snake_case, return_tensors=_snake_case ) )
snake_case : Optional[Any] =inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 349
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCAmelCase__ : int = random.Random()
def UpperCamelCase__ ( A__ , A__=1.0 , A__=None , A__=None ) -> str:
if rng is None:
snake_case__ : List[Any] = global_rng
snake_case__ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __snake_case ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=400 , __UpperCamelCase=2000 , __UpperCamelCase=1 , __UpperCamelCase=0.0 , __UpperCamelCase=16000 , __UpperCamelCase=True , __UpperCamelCase=80 , __UpperCamelCase=16 , __UpperCamelCase=64 , __UpperCamelCase="hann_window" , __UpperCamelCase=80 , __UpperCamelCase=7600 , __UpperCamelCase=1E-10 , __UpperCamelCase=True , ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : int = min_seq_length
snake_case__ : Tuple = max_seq_length
snake_case__ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ : Any = feature_size
snake_case__ : List[Any] = padding_value
snake_case__ : Tuple = sampling_rate
snake_case__ : int = do_normalize
snake_case__ : str = num_mel_bins
snake_case__ : List[str] = hop_length
snake_case__ : Tuple = win_length
snake_case__ : List[str] = win_function
snake_case__ : List[str] = fmin
snake_case__ : Dict = fmax
snake_case__ : Tuple = mel_floor
snake_case__ : Tuple = return_attention_mask
def __a ( self ) -> Tuple:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[int]:
'''simple docstring'''
def _flatten(__UpperCamelCase ):
return list(itertools.chain(*_a ) )
if equal_length:
snake_case__ : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case__ : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : int = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
def __a ( self , __UpperCamelCase=False , __UpperCamelCase=False ) -> str:
'''simple docstring'''
if equal_length:
snake_case__ : int = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ : str = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : List[str] = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
class __snake_case ( UpperCamelCase__ ,unittest.TestCase ):
__lowerCamelCase = SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Any = SpeechTaFeatureExtractionTester(self )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : int = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
snake_case__ : List[Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
snake_case__ : Dict = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test batched
snake_case__ : List[str] = feat_extract(_a , return_tensors='np' ).input_values
snake_case__ : List[str] = feat_extract(_a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Any = ["""longest""", """max_length""", """do_not_pad"""]
snake_case__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(_a , _a ):
snake_case__ : Optional[Any] = feat_extract(_a , padding=_a , max_length=_a , return_tensors='np' )
snake_case__ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : List[str] = range(800 , 1400 , 200 )
snake_case__ : Any = [floats_list((1, x) )[0] for x in lengths]
snake_case__ : List[Any] = ["""longest""", """max_length""", """do_not_pad"""]
snake_case__ : Optional[Any] = [None, 1600, None]
for max_length, padding in zip(_a , _a ):
snake_case__ : Optional[Any] = feat_extract(_a , max_length=_a , padding=_a )
snake_case__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Any = feat_extract(
_a , truncation=_a , max_length=1000 , padding='max_length' , return_tensors='np' )
snake_case__ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : str = feat_extract(
_a , truncation=_a , max_length=1000 , padding='longest' , return_tensors='np' )
snake_case__ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
snake_case__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Dict = feat_extract(
_a , truncation=_a , max_length=2000 , padding='longest' , return_tensors='np' )
snake_case__ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Any = np.random.rand(100 ).astype(np.floataa )
snake_case__ : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ : List[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case__ : Optional[Any] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case__ : Optional[Any] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
snake_case__ : Dict = feature_extractor(audio_target=_a , padding=_a , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
snake_case__ : Any = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
snake_case__ : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test batched
snake_case__ : Dict = feature_extractor(_a , return_tensors='np' ).input_values
snake_case__ : Optional[int] = feature_extractor(_a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case__ : Tuple = np.asarray(_a )
snake_case__ : Union[str, Any] = feature_extractor(_a , return_tensors='np' ).input_values
snake_case__ : Optional[int] = feature_extractor(_a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : List[Any] = feat_extract.model_input_names[0]
snake_case__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
snake_case__ : Any = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
snake_case__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
snake_case__ : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case__ : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
snake_case__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : Any = feat_extract.model_input_names[0]
snake_case__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
snake_case__ : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ : Any = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ : Any = feat_extract.model_input_names[0]
snake_case__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
snake_case__ : Dict = feat_extract.num_mel_bins # hack!
snake_case__ : Union[str, Any] = feat_extract.pad(_a , padding='longest' , return_tensors='np' )[input_name]
snake_case__ : Union[str, Any] = feat_extract.pad(_a , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : List[str] = self.feat_extract_dict
snake_case__ : Any = True
snake_case__ : List[Any] = self.feature_extraction_class(**_a )
snake_case__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ : str = [len(_a ) for x in speech_inputs]
snake_case__ : Dict = feat_extract.model_input_names[0]
snake_case__ : str = BatchFeature({input_name: speech_inputs} )
snake_case__ : Tuple = feat_extract.num_mel_bins # hack!
snake_case__ : Tuple = feat_extract.pad(_a , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.feat_extract_dict
snake_case__ : int = True
snake_case__ : Optional[Any] = self.feature_extraction_class(**_a )
snake_case__ : Any = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ : Dict = [len(_a ) for x in speech_inputs]
snake_case__ : Tuple = feat_extract.model_input_names[0]
snake_case__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
snake_case__ : str = min(_a )
snake_case__ : Any = feat_extract.num_mel_bins # hack!
snake_case__ : Union[str, Any] = feat_extract.pad(
_a , padding='max_length' , max_length=_a , truncation=_a , return_tensors='np' )
self.assertIn('attention_mask' , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
from datasets import load_dataset
snake_case__ : Optional[Any] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case__ : Optional[Any] = ds.sort('id' ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : int = torch.tensor(
[2.38_04E-03, 2.07_52E-03, 1.98_36E-03, 2.10_57E-03, 1.61_74E-03,
3.05_18E-04, 9.15_53E-05, 3.35_69E-04, 9.76_56E-04, 1.83_11E-03,
2.01_42E-03, 2.10_57E-03, 1.73_95E-03, 4.57_76E-04, -3.96_73E-04,
4.57_76E-04, 1.00_71E-03, 9.15_53E-05, 4.88_28E-04, 1.15_97E-03,
7.32_42E-04, 9.46_04E-04, 1.80_05E-03, 1.83_11E-03, 8.85_01E-04,
4.27_25E-04, 4.88_28E-04, 7.32_42E-04, 1.09_86E-03, 2.10_57E-03] )
# fmt: on
snake_case__ : List[str] = self._load_datasamples(1 )
snake_case__ : Optional[Any] = SpeechTaFeatureExtractor()
snake_case__ : Dict = feature_extractor(_a , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _a , atol=1E-6 ) )
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Tuple = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
snake_case__ : Dict = self._load_datasamples(1 )
snake_case__ : Any = SpeechTaFeatureExtractor()
snake_case__ : List[str] = feature_extractor(audio_target=_a , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _a , atol=1E-4 ) )
| 710
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
snake_case__ : Any = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case__ : Tuple = CLIPTextModel(__UpperCamelCase )
snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case__ : str = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Dict = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case__ : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Optional[int] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Tuple = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : str = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : List[str] = 'french fries'
snake_case__ : Optional[Any] = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
snake_case__ : Union[str, Any] = output.images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : str = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = [inputs['prompt']] * 2
snake_case__ : Optional[int] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : Optional[int] = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
snake_case__ : Any = image / 2 + 0.5
snake_case__ : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
snake_case__ : List[Any] = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : Optional[int] = sd_pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase )
snake_case__ : Any = sd_pipe(**__UpperCamelCase ).images
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Tuple = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : int = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase )
snake_case__ : Union[str, Any] = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case__ : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' ) )[0]
snake_case__ : Union[str, Any] = components['vae']
snake_case__ : str = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : Dict = pipe(**__UpperCamelCase )[0]
snake_case__ : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(__UpperCamelCase , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , __UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = torch.manual_seed(__UpperCamelCase )
snake_case__ : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
snake_case__ : int = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Tuple = self.get_inputs()
snake_case__ : List[Any] = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : Dict = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Dict = self.get_inputs()
snake_case__ : Dict = pipe(**__UpperCamelCase ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : List[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase )
snake_case__ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = self.get_inputs()
snake_case__ : Optional[int] = pipe(**__UpperCamelCase ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ : int = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = 0
def callback_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None:
snake_case__ : List[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : List[str] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case__ : Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ : Dict = latents[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case__ : str = False
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case__ : Dict = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : str = self.get_inputs()
snake_case__ : Tuple = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Tuple = inputs['image'].resize((504, 504) )
snake_case__ : str = 'timbrooks/instruct-pix2pix'
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__UpperCamelCase , safety_checker=__UpperCamelCase , )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ : str = pipe(**__UpperCamelCase )
snake_case__ : List[Any] = output.images[0]
snake_case__ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
snake_case__ : List[str] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 699
| 0
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
A__ : Dict = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Tuple ) -> int:
__snake_case : Dict = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__snake_case : Any = int(re.match(r'.*layer_(\d*).*' ,_UpperCAmelCase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def a_ ( _UpperCAmelCase : Tuple ) -> Any:
if dtype == torch.bool:
return 1 / 8
__snake_case : Optional[int] = re.search(r'[^\d](\d+)$' ,str(_UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
__snake_case : Tuple = int(bit_search.groups()[0] )
return bit_size // 8
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ) -> Dict:
# Construct model
if bloom_config_file == "":
__snake_case : int = BloomConfig()
else:
__snake_case : Any = BloomConfig.from_json_file(_UpperCAmelCase )
if shard_model:
__snake_case : str = os.listdir(_UpperCAmelCase )
__snake_case : Union[str, Any] = sorted(filter(lambda _UpperCAmelCase : s.startswith('layer' ) and "model_00" in s ,_UpperCAmelCase ) )
__snake_case : str = {'weight_map': {}, 'metadata': {}}
__snake_case : List[Any] = 0
__snake_case : Dict = None
__snake_case : List[str] = BloomConfig()
for j, file in enumerate(_UpperCAmelCase ):
print('Processing file: {}'.format(_UpperCAmelCase ) )
__snake_case : Dict = None
for i in range(_UpperCAmelCase ):
# load all TP files
__snake_case : Any = file.replace('model_00' ,f'''model_0{i}''' )
__snake_case : Any = torch.load(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ,map_location='cpu' )
# Rename keys in the transformers names
__snake_case : Tuple = list(temp.keys() )
for key in keys:
__snake_case : Optional[int] = temp.pop(_UpperCAmelCase )
if tensors is None:
__snake_case : Optional[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__snake_case : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__snake_case : int = torch.cat([tensors[key], temp[key]] ,dim=_UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__snake_case : Optional[int] = tensors[key] / pretraining_tp
torch.save(
_UpperCAmelCase ,os.path.join(
_UpperCAmelCase ,'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) ,str(len(_UpperCAmelCase ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
__snake_case : List[str] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__snake_case : int = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) ,str(len(_UpperCAmelCase ) ).zfill(5 ) )
__snake_case : Optional[Any] = BloomConfig()
__snake_case : Tuple = pytorch_dump_folder_path + '/' + CONFIG_NAME
__snake_case : str = total_size
with open(_UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_UpperCAmelCase ,WEIGHTS_NAME + '.index.json' ) ,'w' ,encoding='utf-8' ) as f:
__snake_case : Tuple = json.dumps(_UpperCAmelCase ,indent=2 ,sort_keys=_UpperCAmelCase ) + '\n'
f.write(_UpperCAmelCase )
else:
__snake_case : Optional[int] = BloomModel(_UpperCAmelCase )
__snake_case : str = os.listdir(_UpperCAmelCase )
__snake_case : Optional[Any] = sorted(filter(lambda _UpperCAmelCase : s.startswith('layer' ) and "model_00" in s ,_UpperCAmelCase ) )
__snake_case : Optional[Any] = None
for i, file in enumerate(_UpperCAmelCase ):
__snake_case : str = None
for i in range(_UpperCAmelCase ):
# load all TP files
__snake_case : str = file.replace('model_00' ,f'''model_0{i}''' )
__snake_case : Any = torch.load(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ,map_location='cpu' )
# Rename keys in the transformers names
__snake_case : Union[str, Any] = list(temp.keys() )
for key in keys:
__snake_case : Optional[int] = temp.pop(_UpperCAmelCase )
if tensors is None:
__snake_case : List[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__snake_case : str = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__snake_case : Any = torch.cat([tensors[key], temp[key]] ,dim=_UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__snake_case : List[str] = tensors[key] / pretraining_tp
__snake_case : str = model.load_state_dict(_UpperCAmelCase ,strict=_UpperCAmelCase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
__snake_case : List[str] = set(other_keys.missing_keys )
else:
__snake_case : Union[str, Any] = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_UpperCAmelCase ,exist_ok=_UpperCAmelCase )
__snake_case : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__snake_case : List[str] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
__snake_case : int = model.to(config.torch_dtype )
torch.save(model.state_dict() ,_UpperCAmelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
A__ : Dict = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 286
|
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = WavaVecaPhonemeCTCTokenizer
A__ = False
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__snake_case : Optional[Any] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
__snake_case : Tuple = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : List[str] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
def A_ ( self : Tuple , __a : Any , __a : str=False , __a : Tuple=20 , __a : int=5 ) -> Tuple[str, list]:
'''simple docstring'''
__snake_case : Any = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__a )) for i in range(len(__a ) )]
__snake_case : Optional[int] = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
__snake_case : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
__snake_case : Tuple = ' ' + output_txt
__snake_case : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def A_ ( self : Union[str, Any] , **__a : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
__snake_case : Optional[Any] = tokenizer('m xxx ɪ' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
__snake_case : Union[str, Any] = tokenizer('m aaa ɪ ccc' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__snake_case : Dict = tokenizer('maɪ c' , do_phonemize=__a ).input_ids
self.assertEqual(__a , [3, 200] ) # mai should be <unk> (=3)
def A_ ( self : Any ) -> str:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : List[str] = 'Hello how are you'
__snake_case : Dict = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(__a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Optional[Any] = 'Hello how are you'
__snake_case : List[str] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Tuple = 'Hello how are you'
__snake_case : Tuple = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : str = tokenizer.decode(tokenizer(__a ).input_ids )
self.assertEqual(__a , __a )
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__snake_case : Tuple = tokenizer.decode(sample_ids[0] )
__snake_case : str = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def A_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Optional[Any] = 'Hello how are you'
__snake_case : Union[str, Any] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(__a , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Any = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Tuple = 'Hello how are you'
__snake_case : List[str] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
__snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__snake_case : Dict = tokenizer.decode(sample_ids[0] )
__snake_case : Tuple = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
__snake_case : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__a )
__snake_case : Optional[int] = tokenizer.batch_decode(__a , filter_word_delimiter_token=__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def A_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Any = 'Hello how are you'
__snake_case : Optional[int] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : Union[str, Any] = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(__a , __a )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
__snake_case : Optional[int] = 'Hello how are you'
__snake_case : List[Any] = tokenizer.phonemize(__a , phonemizer_lang='en-us' )
__snake_case : Union[str, Any] = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , __a )
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=__a )
__snake_case : Any = 'Hello how are you'
__snake_case : Union[str, Any] = tokenizer(__a , phonemizer_lang='en-us' ).input_ids
__snake_case : Union[str, Any] = tokenizer(__a , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(__a , __a )
__snake_case : str = tokenizer.decode(__a )
__snake_case : int = tokenizer.decode(__a )
self.assertEqual(__a , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(__a , 'ɛ l o h aʊ a ʁ j u' )
def A_ ( self : str ) -> str:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
__snake_case : List[str] = 'Hello how Are you'
__snake_case : Optional[Any] = 'hello how are you'
__snake_case : Union[str, Any] = tokenizer(__a ).input_ids
__snake_case : Any = tokenizer(__a ).input_ids
self.assertEqual(__a , __a )
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
__snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__snake_case : str = tokenizer.batch_decode(__a )
self.assertEqual(__a , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def A_ ( __a : Any , __a : Dict ) -> Tuple:
'''simple docstring'''
__snake_case : str = [d[key] for d in offsets]
return retrieved_list
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__snake_case : int = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__snake_case : Any = tokenizer.decode(__a , output_char_offsets=__a , filter_word_delimiter_token=__a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(__a , __a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def A_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case : Union[str, Any] = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(__a : int , __a : Union[str, Any] ):
self.assertTrue(isinstance(__a , __a ) )
self.assertTrue(isinstance(outputs_list[0] , __a ) )
# transform list to ModelOutput
__snake_case : Optional[int] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(__a : Any , __a : str ):
if isinstance(__a , __a ):
[recursive_check(__a , __a ) for la, la in zip(__a , __a )]
self.assertEqual(__a , __a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
__snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__snake_case : List[str] = tokenizer.batch_decode(__a , output_char_offsets=__a )
__snake_case : str = [tokenizer.decode(__a , output_char_offsets=__a ) for ids in sample_ids]
check_list_tuples_equal(__a , __a )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def A_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
__snake_case : int = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : int = tokenizer.vocab_size
__snake_case : List[Any] = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__snake_case : Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
__snake_case : Optional[int] = tokenizer.add_tokens(__a )
__snake_case : Optional[int] = tokenizer.vocab_size
__snake_case : Tuple = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
__snake_case : Optional[int] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__snake_case : Tuple = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__snake_case : Optional[Any] = tokenizer.add_special_tokens(__a )
__snake_case : int = tokenizer.vocab_size
__snake_case : Any = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
__snake_case : List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def A_ ( self : str ) -> Any:
'''simple docstring'''
pass
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__snake_case : Optional[int] = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Union[str, Any] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
__snake_case : List[Any] = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(output['text'] , __a )
| 286
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : str ={
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str =[
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__snake_case : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705
|
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Any = sorted(zip(lowerCamelCase_ ,lowerCamelCase_) ,key=lambda lowerCamelCase_: x[0] / x[1] ,reverse=lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ : Tuple = list(accumulate(lowerCamelCase_))
lowerCAmelCase__ : str = bisect(lowerCamelCase_ ,lowerCamelCase_)
return (
0
if k == 0
else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k])
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
| 0
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A : Optional[int] = logging.getLogger()
def a__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument("-f" )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
return args.f
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , "all_results.json" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__UpperCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def a__ ( ):
SCREAMING_SNAKE_CASE_ = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
A : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@classmethod
def __A ( cls : List[str] ) -> Union[str, Any]:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __A ( cls : Optional[Any] ) -> Optional[Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __A ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __A ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __A ( self : List[Any] ) -> str:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
SCREAMING_SNAKE_CASE_ = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __A ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __A ( self : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __A ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "translation_no_trainer" ) ) )
@slow
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = logging.StreamHandler(sys.stdout )
logger.addHandler(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __A ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE_ = get_results(__magic_name__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(__magic_name__ , "image_classification_no_trainer" ) ) )
| 140
|
import operator as op
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCamelCase , __UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(1_2 ) , "Stack" , sep=" | " )
print("-" * (3_0 + len(__UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(__UpperCamelCase ) , int(__UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
A : str = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 140
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCamelCase : Dict =logging.get_logger(__name__)
class __snake_case( A_ ):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 237
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
| 1
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
snake_case_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
snake_case_ = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
snake_case_ = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __lowerCamelCase ( self , lowercase__=None , lowercase__=None , lowercase__=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(lowercase__ , lowercase__ )["wer"]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
for prediction, reference in zip(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = compute_measures(lowercase__ , lowercase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 421
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
snake_case_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
snake_case_ = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
snake_case_ = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def __lowerCamelCase ( self , lowercase__=None , lowercase__=None , lowercase__=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(lowercase__ , lowercase__ )["wer"]
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : List[str] = 0
for prediction, reference in zip(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = compute_measures(lowercase__ , lowercase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 421
| 1
|
'''simple docstring'''
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : list , snake_case : list , snake_case : int ) -> list:
"""simple docstring"""
a : Optional[int] = 0
# Number of processes finished
a : Tuple = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
a : int = [0] * no_of_process
# List to include calculation results
a : List[str] = [0] * no_of_process
# Sort by arrival time.
a : Any = [burst_time[i] for i in np.argsort(_UpperCamelCase )]
a : str = [process_name[i] for i in np.argsort(_UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
a : Any = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
a : List[Any] = arrival_time[i]
a : Dict = 0
# Index showing the location of the process being performed
a : Tuple = 0
# Saves the current response ratio.
a : Optional[int] = 0
for i in range(0 , _UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
a : Any = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
a : str = temp
a : List[Any] = i
# Calculate the turn around time
a : Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
a : Optional[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : list , snake_case : list , snake_case : int ) -> list:
"""simple docstring"""
a : Union[str, Any] = [0] * no_of_process
for i in range(0 , _UpperCamelCase ):
a : str = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCamelCase : Dict = 5
UpperCamelCase : List[Any] = ["""A""", """B""", """C""", """D""", """E"""]
UpperCamelCase : Union[str, Any] = [1, 2, 3, 4, 5]
UpperCamelCase : str = [1, 2, 3, 4, 5]
UpperCamelCase : Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCamelCase : Union[str, Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 716
|
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 610
| 0
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 100 ):
lowercase__ = (n * (n + 1) // 2) ** 2
lowercase__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 413
|
class _snake_case :
def __init__( self : Optional[int], __lowercase : int ):
lowercase__ = size
lowercase__ = [0] * size
lowercase__ = [0] * size
@staticmethod
def A__ ( __lowercase : int ):
return index | (index + 1)
@staticmethod
def A__ ( __lowercase : int ):
return (index & (index + 1)) - 1
def A__ ( self : Optional[Any], __lowercase : int, __lowercase : int ):
lowercase__ = value
while index < self.size:
lowercase__ = self.get_prev(__lowercase ) + 1
if current_left_border == index:
lowercase__ = value
else:
lowercase__ = max(__lowercase, __lowercase, __lowercase )
lowercase__ = self.get_next(__lowercase )
def A__ ( self : List[Any], __lowercase : int, __lowercase : int ):
right -= 1 # Because of right is exclusive
lowercase__ = 0
while left <= right:
lowercase__ = self.get_prev(__lowercase )
if left <= current_left:
lowercase__ = max(__lowercase, self.tree[right] )
lowercase__ = current_left
else:
lowercase__ = max(__lowercase, self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = 1
A = 3
A = (3_2, 3_2)
A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def UpperCamelCase__ ( self ) -> List[str]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,)
return model
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
A = RobertaSeriesConfig(
hidden_size=3_2 ,project_dim=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5_0_0_6 ,)
return RobertaSeriesModelWithTransformation(lowerCamelCase_ )
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
def extract(*lowerCamelCase_ ,**lowerCamelCase_ ):
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> List[Any]:
A = torch.ones([0] )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Any:
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def UpperCamelCase__ ( self ) -> Tuple:
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
A = self.dummy_vae
A = self.dummy_text_encoder
A = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A = 7_7
A = self.dummy_image.to(lowerCamelCase_ )
A = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCamelCase_ )
A = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = """A painting of a squirrel eating a burger"""
A = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A = alt_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,image=lowerCamelCase_ ,)
A = output.images
A = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A = alt_pipe(
[prompt] ,generator=lowerCamelCase_ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="""np""" ,image=lowerCamelCase_ ,return_dict=lowerCamelCase_ ,)[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.dummy_cond_unet
A = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
A = self.dummy_vae
A = self.dummy_text_encoder
A = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A = 7_7
A = self.dummy_image.to(lowerCamelCase_ )
# put models in fp16
A = unet.half()
A = vae.half()
A = bert.half()
# make sure here that pndm scheduler skips prk
A = AltDiffusionImgaImgPipeline(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,vae=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=self.dummy_extractor ,)
A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCamelCase_ )
A = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = """A painting of a squirrel eating a burger"""
A = torch.manual_seed(0 )
A = alt_pipe(
[prompt] ,generator=lowerCamelCase_ ,num_inference_steps=2 ,output_type="""np""" ,image=lowerCamelCase_ ,).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def UpperCamelCase__ ( self ) -> Tuple:
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A = init_image.resize((7_6_0, 5_0_4) )
A = """BAAI/AltDiffusion"""
A = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,)
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
A = """A fantasy landscape, trending on artstation"""
A = torch.manual_seed(0 )
A = pipe(
prompt=lowerCamelCase_ ,image=lowerCamelCase_ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCamelCase_ ,output_type="""np""" ,)
A = output.images[0]
A = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
A = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A = init_image.resize((7_6_8, 5_1_2) )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A = """BAAI/AltDiffusion"""
A = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,)
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
A = """A fantasy landscape, trending on artstation"""
A = torch.manual_seed(0 )
A = pipe(
prompt=lowerCamelCase_ ,image=lowerCamelCase_ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCamelCase_ ,output_type="""np""" ,)
A = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 255
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase =256
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''melgan''']
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,) -> None:
super().__init__()
# From MELGAN
A = math.log(1E-5 ) # Matches MelGAN training.
A = 4.0 # Largest value for most examples
A = 1_2_8
self.register_modules(
notes_encoder=lowerCamelCase_ ,continuous_encoder=lowerCamelCase_ ,decoder=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,melgan=lowerCamelCase_ ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=(-1.0, 1.0) ,lowerCamelCase_=False ) -> str:
A , A = output_range
if clip:
A = torch.clip(lowerCamelCase_ ,self.min_value ,self.max_value )
# Scale to [0, 1].
A = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=(-1.0, 1.0) ,lowerCamelCase_=False ) -> Optional[Any]:
A , A = input_range
A = torch.clip(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) if clip else outputs
# Scale to [0, 1].
A = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
A = input_tokens > 0
A , A = self.notes_encoder(
encoder_input_tokens=lowerCamelCase_ ,encoder_inputs_mask=lowerCamelCase_ )
A , A = self.continuous_encoder(
encoder_inputs=lowerCamelCase_ ,encoder_inputs_mask=lowerCamelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = noise_time
if not torch.is_tensor(lowerCamelCase_ ):
A = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
A = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
A = self.decoder(
encodings_and_masks=lowerCamelCase_ ,decoder_input_tokens=lowerCamelCase_ ,decoder_noise_time=lowerCamelCase_ )
return logits
@torch.no_grad()
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = 1_0_0 ,lowerCamelCase_ = True ,lowerCamelCase_ = "numpy" ,lowerCamelCase_ = None ,lowerCamelCase_ = 1 ,) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(lowerCamelCase_ )}.' )
A = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
A = np.zeros([1, 0, self.n_dims] ,np.floataa )
A = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase_ ,device=self.device )
for i, encoder_input_tokens in enumerate(lowerCamelCase_ ):
if i == 0:
A = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
A = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=lowerCamelCase_ ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A = ones
A = self.scale_features(
lowerCamelCase_ ,output_range=[-1.0, 1.0] ,clip=lowerCamelCase_ )
A = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=lowerCamelCase_ ,continuous_mask=lowerCamelCase_ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=lowerCamelCase_ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A = self.decode(
encodings_and_masks=lowerCamelCase_ ,input_tokens=lowerCamelCase_ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
A = self.scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
A = self.scale_to_features(lowerCamelCase_ ,input_range=[-1.0, 1.0] )
A = mel[:1]
A = mel.cpu().float().numpy()
A = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase_ ,lowerCamelCase_ )
logger.info("""Generated segment""" ,lowerCamelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
A = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 255
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase_ = '''CLIPImageProcessor'''
lowerCAmelCase_ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , _A : List[str]=None , _A : List[Any]=None , **_A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
__SCREAMING_SNAKE_CASE : str = kwargs.pop('''feature_extractor''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_A , _A )
def __call__( self : List[str] , _A : Union[str, Any]=None , _A : Union[str, Any]=None , _A : Tuple=None , **_A : Optional[int] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
__SCREAMING_SNAKE_CASE : str = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def UpperCAmelCase__ ( self : List[Any] , *_A : Tuple , **_A : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase__ ( self : Optional[int] , *_A : Optional[Any] , **_A : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _A , )
return self.image_processor
| 74
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase (_snake_case=None ) -> Any:
'''simple docstring'''
if subparsers is not None:
__UpperCamelCase = subparsers.add_parser("test" )
else:
__UpperCamelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" ,default=_snake_case ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,)
if subparsers is not None:
parser.set_defaults(func=_snake_case )
return parser
def lowercase (_snake_case ) -> Dict:
'''simple docstring'''
__UpperCamelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
__UpperCamelCase = script_name
else:
__UpperCamelCase = f"""--config_file={args.config_file} {script_name}"""
__UpperCamelCase = ["accelerate-launch"] + test_args.split()
__UpperCamelCase = execute_subprocess_async(_snake_case ,env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def lowercase () -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = test_command_parser()
__UpperCamelCase = parser.parse_args()
test_command(_snake_case )
if __name__ == "__main__":
main()
| 505
| 0
|
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = size
UpperCamelCase__ = [0] * size
UpperCamelCase__ = [0] * size
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = value
while index < self.size:
UpperCamelCase__ = self.get_prev(SCREAMING_SNAKE_CASE_ ) + 1
if current_left_border == index:
UpperCamelCase__ = value
else:
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_next(SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
UpperCamelCase__ = 0
while left <= right:
UpperCamelCase__ = self.get_prev(SCREAMING_SNAKE_CASE_ )
if left <= current_left:
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , self.tree[right] )
UpperCamelCase__ = current_left
else:
UpperCamelCase__ = max(SCREAMING_SNAKE_CASE_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 469
|
from __future__ import annotations
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = get_failure_array(A )
# 2) Step through text searching for pattern
UpperCamelCase__ , UpperCamelCase__ = 0, 0 # index into text, pattern
while i < len(A ):
if pattern[j] == text[i]:
if j == (len(A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase__ = failure[j - 1]
continue
i += 1
return False
def __UpperCamelCase ( A ):
UpperCamelCase__ = [0]
UpperCamelCase__ = 0
UpperCamelCase__ = 1
while j < len(A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase__ = failure[i - 1]
continue
j += 1
failure.append(A )
return failure
if __name__ == "__main__":
# Test 1)
__magic_name__ ='''abc1abc12'''
__magic_name__ ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__magic_name__ ='''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__magic_name__ ='''ABABX'''
__magic_name__ ='''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__magic_name__ ='''AAAB'''
__magic_name__ ='''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__magic_name__ ='''abcdabcy'''
__magic_name__ ='''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__magic_name__ ='''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 469
| 1
|
import random
class __UpperCamelCase :
@staticmethod
def __A ( lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = [ord(_lowerCamelCase ) for i in text]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for i in plain:
UpperCAmelCase_ = random.randint(1 , 300 )
UpperCAmelCase_ = (i + k) * k
cipher.append(_lowerCamelCase )
key.append(_lowerCamelCase )
return cipher, key
@staticmethod
def __A ( lowerCAmelCase : list[int] , lowerCAmelCase : list[int] ):
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(len(_lowerCamelCase ) ):
UpperCAmelCase_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_lowerCamelCase ) )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
_a: Any = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 162
|
def _UpperCAmelCase ( UpperCAmelCase : int = 600_851_475_143 ):
"""simple docstring"""
try:
__lowerCamelCase : Any = int(UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
__lowerCamelCase : int = 1
__lowerCamelCase : str = 2
while i * i <= n:
while n % i == 0:
__lowerCamelCase : Union[str, Any] = i
n //= i
i += 1
if n > 1:
__lowerCamelCase : Dict = n
return int(UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 519
| 0
|
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
def count_of_possible_combinations(_a : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a : int , _a : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCAmelCase_ : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
UpperCAmelCase_ : Union[str, Any] = answer
return answer
UpperCAmelCase_ : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : Dict = [0] * (target + 1)
UpperCAmelCase_ : int = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = 3
UpperCamelCase_ = 5
UpperCamelCase_ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 322
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
'''simple docstring'''
A__ : Any = BlenderbotConfig
A__ : List[str] = {}
A__ : Tuple = "gelu"
def __init__( self: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]=13 ,lowerCamelCase_: List[str]=7 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: List[Any]=99 ,lowerCamelCase_: Dict=32 ,lowerCamelCase_: str=2 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: List[Any]=37 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[Any]=0.1 ,lowerCamelCase_: Optional[int]=20 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: str=1 ,lowerCamelCase_: Any=0 ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = pad_token_id
UpperCAmelCase_ : str = bos_token_id
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Dict = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : int = prepare_blenderbot_inputs_dict(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return config, inputs_dict
def A__ ( self: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = TFBlenderbotModel(config=lowerCamelCase_ ).get_decoder()
UpperCAmelCase_ : List[str] = inputs_dict["""input_ids"""]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Dict = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase_ : str = inputs_dict["""head_mask"""]
UpperCAmelCase_ : str = 1
# first forward pass
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,head_mask=lowerCamelCase_ ,use_cache=lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Dict = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )[0]
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : List[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-3 )
def lowerCamelCase_ ( _a : Any , _a : Tuple , _a : Any , _a : Optional[int]=None , _a : int=None , _a : int=None , _a : int=None , _a : Dict=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Optional[Any] = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Union[str, Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
A__ : Optional[int] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
A__ : Optional[Any] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
A__ : int = True
A__ : Optional[int] = False
A__ : Union[str, Any] = False
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : str = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def A__ ( self: str ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_ )
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : int = ["My friends are cool but they eat too many carbs."]
A__ : Optional[int] = "facebook/blenderbot-400M-distill"
@cached_property
def A__ ( self: Optional[Any] ) -> Optional[int]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def A__ ( self: Tuple ) -> List[str]:
UpperCAmelCase_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A__ ( self: Dict ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(self.src_text ,return_tensors="""tf""" )
UpperCAmelCase_ : Tuple = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=lowerCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 322
| 1
|
'''simple docstring'''
lowercase__ : dict[tuple[int, int, int], int] = {}
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase_ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase_ = _calculate(days - 1 , _UpperCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase_ = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase_ = _calculate(days - 1 , _UpperCamelCase , 0 )
UpperCAmelCase_ = state_late + state_absent + state_ontime
UpperCAmelCase_ = prizestrings
return prizestrings
def __lowerCamelCase ( _UpperCamelCase : int = 30 ):
'''simple docstring'''
return _calculate(_UpperCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 390
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowercase__ : List[Any] = "examples/"
lowercase__ : Any = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowercase__ : str = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowercase__ : Optional[Any] = "README.md"
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Any ):
'''simple docstring'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.read()
UpperCAmelCase_ , UpperCAmelCase_ = REPLACE_PATTERNS[pattern]
UpperCAmelCase_ = replace.replace('''VERSION''' , _UpperCamelCase )
UpperCAmelCase_ = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern='''examples''' )
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[str]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase_ = '''1. Want to contribute a new model?'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
# Find the start of the list.
UpperCAmelCase_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase_ = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
def __lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase_ = f.read()
UpperCAmelCase_ = REPLACE_PATTERNS['''init'''][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : Tuple=False ):
'''simple docstring'''
UpperCAmelCase_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase_ = default_version.base_version
elif patch:
UpperCAmelCase_ = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase_ = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase_ = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_UpperCamelCase ) == 0:
UpperCAmelCase_ = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = get_version()
UpperCAmelCase_ = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase_ = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase_ = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_UpperCamelCase ) == 0:
UpperCAmelCase_ = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_UpperCamelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowercase__ : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 390
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A = logging.get_logger(__name__) # pylint: disable=invalid-name
A = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def a(lowercase__ , lowercase__ , lowercase__=8 ):
'''simple docstring'''
snake_case_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def a(lowercase__ , lowercase__=512 , lowercase__=512 ):
'''simple docstring'''
snake_case_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
snake_case_ = np.array(pil_image.convert('RGB' ) )
snake_case_ = arr.astype(np.floataa ) / 127.5 - 1
snake_case_ = np.transpose(lowercase__ , [2, 0, 1] )
snake_case_ = torch.from_numpy(lowercase__ ).unsqueeze(0 )
return image
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__UpperCamelCase , scheduler=__UpperCamelCase , movq=__UpperCamelCase , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = min(int(num_inference_steps * strength ) , __UpperCamelCase )
snake_case_ = max(num_inference_steps - init_timestep , 0 )
snake_case_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
"""simple docstring"""
if not isinstance(__UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCamelCase )}""" )
snake_case_ = image.to(device=__UpperCamelCase , dtype=__UpperCamelCase )
snake_case_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
snake_case_ = image
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCamelCase )
]
snake_case_ = torch.cat(__UpperCamelCase , dim=0 )
else:
snake_case_ = self.movq.encode(__UpperCamelCase ).latent_dist.sample(__UpperCamelCase )
snake_case_ = self.movq.config.scaling_factor * init_latents
snake_case_ = torch.cat([init_latents] , dim=0 )
snake_case_ = init_latents.shape
snake_case_ = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
# get latents
snake_case_ = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = init_latents
return latents
def __lowerCAmelCase ( self , __UpperCamelCase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
snake_case_ = torch.device(f"""cuda:{gpu_id}""" )
snake_case_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
snake_case_ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=__UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(__UpperCamelCase , __UpperCamelCase , prev_module_hook=__UpperCamelCase )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 5_12 , __UpperCamelCase = 5_12 , __UpperCamelCase = 1_00 , __UpperCamelCase = 4.0 , __UpperCamelCase = 0.3 , __UpperCamelCase = 1 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , ):
"""simple docstring"""
snake_case_ = self._execution_device
snake_case_ = guidance_scale > 1.0
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = torch.cat(__UpperCamelCase , dim=0 )
snake_case_ = image_embeds.shape[0]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = torch.cat(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCamelCase )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = [image]
if not all(isinstance(__UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(__UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
snake_case_ = torch.cat([prepare_image(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for i in image] , dim=0 )
snake_case_ = image.to(dtype=image_embeds.dtype , device=__UpperCamelCase )
snake_case_ = self.movq.encode(__UpperCamelCase )['latents']
snake_case_ = latents.repeat_interleave(__UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
snake_case_ , snake_case_ = self.get_timesteps(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
snake_case_ , snake_case_ = downscale_height_and_width(__UpperCamelCase , __UpperCamelCase , self.movq_scale_factor )
snake_case_ = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {'image_embeds': image_embeds}
snake_case_ = self.unet(
sample=__UpperCamelCase , timestep=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , added_cond_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ , snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase , )[0]
# post-processing
snake_case_ = self.movq.decode(__UpperCamelCase , force_not_quantize=__UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 708
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 46
| 0
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ = 16
a_ = 32
def __lowercase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16 , lowerCamelCase : str = "bert-base-cased" ):
UpperCamelCase_ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase )
UpperCamelCase_ : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase : Any ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_ : List[str] = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ : str = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
UpperCamelCase_ : Any = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
UpperCamelCase_ : Dict = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
def __lowercase ( lowerCamelCase : int , lowerCamelCase : str ):
# Initialize accelerator
UpperCamelCase_ : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ : Dict = config['lr']
UpperCamelCase_ : Any = int(config['num_epochs'] )
UpperCamelCase_ : Tuple = int(config['seed'] )
UpperCamelCase_ : List[str] = int(config['batch_size'] )
UpperCamelCase_ : List[str] = args.model_name_or_path
set_seed(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_ : Any = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ : str = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase )
# Instantiate optimizer
UpperCamelCase_ : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_ : int = optimizer_cls(params=model.parameters() , lr=lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
UpperCamelCase_ : Tuple = 1
UpperCamelCase_ : str = (len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , )
else:
UpperCamelCase_ : List[str] = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Tuple = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_ : str = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_ : Any = 0
# Now we train the model
UpperCamelCase_ : List[Any] = evaluate.load('glue' , 'mrpc' )
UpperCamelCase_ : List[str] = 0
UpperCamelCase_ : Tuple = {}
for epoch in range(lowerCamelCase , lowerCamelCase ):
model.train()
for step, batch in enumerate(lowerCamelCase ):
UpperCamelCase_ : Dict = model(**lowerCamelCase )
UpperCamelCase_ : int = outputs.loss
UpperCamelCase_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase_ : str = 0
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ : List[str] = model(**lowerCamelCase )
UpperCamelCase_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_, UpperCamelCase_ : List[str] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCamelCase ) - 1:
UpperCamelCase_ : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_ : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
UpperCamelCase_ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
UpperCamelCase_ : Optional[Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
def __lowercase ( ):
UpperCamelCase_ : Any = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=lowerCamelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCamelCase , )
parser.add_argument(
'--output_dir' , type=lowerCamelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=lowerCamelCase , default=lowerCamelCase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=lowerCamelCase , default=3 , help='Number of train epochs.' , )
UpperCamelCase_ : Any = parser.parse_args()
UpperCamelCase_ : Optional[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 417
|
import math
import os
import sys
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Dict = ''
try:
with open(lowerCamelCase , 'rb' ) as binary_file:
UpperCamelCase_ : Union[str, Any] = binary_file.read()
for dat in data:
UpperCamelCase_ : Optional[int] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : dict[str, str] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : str ):
lexicon.pop(lowerCamelCase )
UpperCamelCase_ : Optional[int] = last_match_id
if math.loga(lowerCamelCase ).is_integer():
for curr_key in lexicon:
UpperCamelCase_ : Optional[int] = '0' + lexicon[curr_key]
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = {'0': '0', '1': '1'}
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = '', ''
UpperCamelCase_ : List[str] = len(lowerCamelCase )
for i in range(len(lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
index += 1
UpperCamelCase_ : Optional[int] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase_ : Any = lexicon[curr_string]
result += last_match_id
return result
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Union[str, Any] = os.path.getsize(lowerCamelCase )
UpperCamelCase_ : List[str] = bin(lowerCamelCase )[2:]
UpperCamelCase_ : int = len(lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Optional[int] = 8
try:
with open(lowerCamelCase , 'wb' ) as opened_file:
UpperCamelCase_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Dict = read_file_binary(lowerCamelCase )
UpperCamelCase_ : Optional[int] = compress_data(lowerCamelCase )
UpperCamelCase_ : Dict = add_file_length(lowerCamelCase , lowerCamelCase )
write_file_binary(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 417
| 1
|
_UpperCAmelCase : Optional[Any] = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_UpperCAmelCase : Union[str, Any] = {value: key for key, value in MORSE_CODE_DICT.items()}
def A ( lowercase ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A ( lowercase ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def A ( ) -> None:
'''simple docstring'''
UpperCamelCase = 'Morse code here!'
print(lowercase )
UpperCamelCase = encrypt(lowercase )
print(lowercase )
UpperCamelCase = decrypt(lowercase )
print(lowercase )
if __name__ == "__main__":
main()
| 701
|
import re
def A ( lowercase ) -> str:
'''simple docstring'''
if len(re.findall('[ATCG]' , lowercase ) ) != len(lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple=13 , _snake_case : List[Any]=32 , _snake_case : Optional[Any]=3 , _snake_case : Optional[int]=4 , _snake_case : Dict=[10, 20, 30, 40] , _snake_case : Optional[int]=[2, 2, 3, 2] , _snake_case : List[Any]=True , _snake_case : Union[str, Any]=True , _snake_case : List[Any]=37 , _snake_case : int="gelu" , _snake_case : Optional[Any]=10 , _snake_case : Optional[int]=0.0_2 , _snake_case : List[Any]=["stage2", "stage3", "stage4"] , _snake_case : Optional[int]=[2, 3, 4] , _snake_case : str=None , ) -> Dict:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = num_stages
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = intermediate_size
A_ = hidden_act
A_ = num_labels
A_ = initializer_range
A_ = out_features
A_ = out_indices
A_ = scope
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : List[Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : List[str] ) -> List[str]:
"""simple docstring"""
A_ = ConvNextModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : int , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
A_ = ConvNextForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str ) -> List[str]:
"""simple docstring"""
A_ = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ = None
A_ = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case = True
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
A_ = ConvNextModelTester(self )
A_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def lowerCamelCase__ ( self : int ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_snake_case )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _snake_case )
def lowerCamelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def lowerCamelCase__ ( self : Any ) -> str:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : List[str] ):
A_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def lowerCamelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ConvNextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A_ ():
'''simple docstring'''
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
A_ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(_snake_case )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_snake_case , return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
A_ = model(**_snake_case )
# verify the logits
A_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _snake_case )
A_ = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase , _lowercase ):
"""simple docstring"""
snake_case = (ConvNextBackbone,) if is_torch_available() else ()
snake_case = ConvNextConfig
snake_case = False
def lowerCamelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
A_ = ConvNextModelTester(self )
| 115
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : List[str] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115
| 1
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ="0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 712
|
"""simple docstring"""
from math import sqrt
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__( __SCREAMING_SNAKE_CASE : int = 1_00_01 ):
lowercase_ : str = 0
lowercase_ : Optional[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(__SCREAMING_SNAKE_CASE ):
count += 1
while count != nth:
number += 2
if is_prime(__SCREAMING_SNAKE_CASE ):
count += 1
return number
if __name__ == "__main__":
print(F"{solution() = }")
| 477
| 0
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE_ = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Dict = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ : str = TaTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self : Tuple , snake_case : List[str]=None , snake_case : List[str]=None , snake_case : Optional[int]="</s>" , snake_case : Optional[int]="<unk>" , snake_case : Dict="<pad>" , snake_case : Any=100 , snake_case : str=None , **snake_case : List[str] , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_snake_case : Any = [F"""<extra_id_{i}>""" for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_snake_case : str = len(set(filter(lambda snake_case : bool('extra_id_' in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
snake_case , tokenizer_file=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , **snake_case , )
_snake_case : int = vocab_file
_snake_case : List[str] = False if not self.vocab_file else True
_snake_case : List[Any] = extra_ids
@staticmethod
def __UpperCAmelCase ( snake_case : List[str] , snake_case : Tuple , snake_case : Dict ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_snake_case : str = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case , )
return max_model_length
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : str , snake_case : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case : int = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __UpperCAmelCase ( self : Tuple , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_snake_case : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCAmelCase ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return list(
set(filter(lambda snake_case : bool(re.search(r'<extra_id_\d+>' , snake_case ) ) is not None , self.additional_special_tokens ) ) )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return [self.convert_tokens_to_ids(snake_case ) for token in self.get_sentinel_tokens()]
| 517
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 517
| 1
|
def UpperCAmelCase_ ( __UpperCamelCase ):
assert (
isinstance(__UpperCamelCase, __UpperCamelCase ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="""utf-8""" ,check=_UpperCamelCase ,)
assert hasattr(self ,"""env""" )
def __A ( self : List[str] ,_UpperCamelCase : int=1 ) -> int:
'''simple docstring'''
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=f"""{self.env.base_job_name}-single""" ,instance_count=_UpperCamelCase ,instance_type=self.instance_type ,debugger_hook_config=_UpperCamelCase ,hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version="""py36""" ,)
def __A ( self : Tuple ,_UpperCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
TrainingJobAnalytics(_UpperCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def __A ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__ =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__ =list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
SCREAMING_SNAKE_CASE__ =list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__ =(
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" ,"""w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,_UpperCamelCase )
| 588
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71
| 1
|
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_lowercase = """src/diffusers"""
# Matches is_xxx_available()
_lowercase = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
_lowercase = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
_lowercase = """
{0} = None
"""
_lowercase = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
_lowercase = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = _re_backend.findall(__lowerCamelCase )
if len(__lowerCamelCase ) == 0:
return None
return "_and_".join(__lowerCamelCase )
def A ():
with open(os.path.join(__lowerCamelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.readlines()
# Get to the point we do the actual imports for type checking
_lowerCAmelCase = 0
_lowerCAmelCase = {}
# Go through the end of the file
while line_index < len(__lowerCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_lowerCAmelCase = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
_lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCamelCase ) and len(lines[line_index] ) > 1:
_lowerCAmelCase = lines[line_index]
_lowerCAmelCase = _re_single_line_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCamelCase ) > 0:
_lowerCAmelCase = objects
else:
line_index += 1
return backend_specific_objects
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Union[str, Any] ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCamelCase , __lowerCamelCase )
else:
return DUMMY_CLASS.format(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any]=None ):
if backend_specific_objects is None:
_lowerCAmelCase = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_lowerCAmelCase = {}
for backend, objects in backend_specific_objects.items():
_lowerCAmelCase = """[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]"""
_lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCamelCase , __lowerCamelCase ) for o in objects] )
_lowerCAmelCase = dummy_file
return dummy_files
def A (__lowerCamelCase :Optional[Any]=False ):
_lowerCAmelCase = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_lowerCAmelCase = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
_lowerCAmelCase = os.path.join(__lowerCamelCase , """utils""" )
_lowerCAmelCase = {
backend: os.path.join(__lowerCamelCase , f'dummy_{short_names.get(__lowerCamelCase , __lowerCamelCase )}_objects.py' )
for backend in dummy_files.keys()
}
_lowerCAmelCase = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.read()
else:
_lowerCAmelCase = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(__lowerCamelCase , __lowerCamelCase )}_objects.py as the main '
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'diffusers.utils.dummy_{short_names.get(__lowerCamelCase , __lowerCamelCase )}_objects.py. Run `make fix-copies` '
"""to fix this.""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowercase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 162
|
'''simple docstring'''
def A (__lowerCamelCase :int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if number < 1:
_lowerCAmelCase = f'Input value of [number={number}] must be > 0'
raise ValueError(__lowerCamelCase )
_lowerCAmelCase = 1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162
| 1
|
from sklearn.metrics import fa_score
import datasets
lowerCamelCase : Union[str, Any] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
lowerCamelCase : Optional[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
lowerCamelCase : Optional[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase (datasets.Metric ):
def __UpperCAmelCase ( self )-> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase="binary" , __UpperCamelCase=None )-> Optional[int]:
__lowerCAmelCase = fa_score(
__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase )
return {"f1": float(__UpperCamelCase ) if score.size == 1 else score}
| 367
|
from math import pow, sqrt
def __lowerCAmelCase ( *__snake_case ):
__lowerCAmelCase = len(__snake_case ) > 0 and all(value > 0.0 for value in values )
return result
def __lowerCAmelCase ( __snake_case , __snake_case ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 367
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase__ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class snake_case ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}] )
SCREAMING_SNAKE_CASE_ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
# Legacy behavior
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}]] )
SCREAMING_SNAKE_CASE_ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE_ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.5_04},
{'''label''': '''LABEL_0''', '''score''': 0.5_04},
] , )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
@require_tf
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
@slow
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline('''text-classification''' )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] )
@slow
@require_tf
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline('''text-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
SCREAMING_SNAKE_CASE_ = '''HuggingFace is in'''
SCREAMING_SNAKE_CASE_ = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
SCREAMING_SNAKE_CASE_ = ['''HuggingFace is in ''', '''Paris is in France''']
SCREAMING_SNAKE_CASE_ = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
SCREAMING_SNAKE_CASE_ = text_classifier(SCREAMING_SNAKE_CASE_ , top_k=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [[{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] * N, [{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] * N] , )
SCREAMING_SNAKE_CASE_ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
SCREAMING_SNAKE_CASE_ = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
SCREAMING_SNAKE_CASE_ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
text_classifier(SCREAMING_SNAKE_CASE_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
SCREAMING_SNAKE_CASE_ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 717
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 628
| 0
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : torch.FloatTensor
__A : Optional[torch.FloatTensor] =None
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int]=0.999 , _SCREAMING_SNAKE_CASE : List[Any]="cosine" , ) -> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCAmelCase_ : List[str] = []
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = i / num_diffusion_timesteps
UpperCAmelCase_ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
@register_to_config
def __init__( self ,_snake_case = 10_00 ,_snake_case = "fixed_small_log" ,_snake_case = True ,_snake_case = 1.0 ,_snake_case = "epsilon" ,_snake_case = "squaredcos_cap_v2" ,):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase_ : Optional[Any] = betas_for_alpha_bar(_snake_case )
UpperCAmelCase_ : Union[str, Any] = 1.0 - self.betas
UpperCAmelCase_ : int = torch.cumprod(self.alphas ,dim=0 )
UpperCAmelCase_ : List[str] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase_ : int = 1.0
# setable values
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(np.arange(0 ,_snake_case )[::-1].copy() )
UpperCAmelCase_ : Optional[Any] = variance_type
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
return sample
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ : Optional[Any] = num_inference_steps
UpperCAmelCase_ : Optional[Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase_ : Tuple = (np.arange(0 ,_snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase_ : Tuple = torch.from_numpy(_snake_case ).to(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if prev_timestep is None:
UpperCAmelCase_ : Any = t - 1
UpperCAmelCase_ : Tuple = self.alphas_cumprod[t]
UpperCAmelCase_ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_ : Tuple = 1 - alpha_prod_t
UpperCAmelCase_ : Optional[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_ : Any = self.betas[t]
else:
UpperCAmelCase_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase_ : List[str] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase_ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase_ : int = torch.log(torch.clamp(_snake_case ,min=1E-20 ) )
UpperCAmelCase_ : List[str] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase_ : Optional[Any] = variance.log()
UpperCAmelCase_ : Union[str, Any] = beta.log()
UpperCAmelCase_ : Dict = (predicted_variance + 1) / 2
UpperCAmelCase_ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase_ , UpperCAmelCase_ : Any = torch.split(_snake_case ,sample.shape[1] ,dim=1 )
else:
UpperCAmelCase_ : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase_ : Optional[int] = t - 1
UpperCAmelCase_ : int = self.alphas_cumprod[t]
UpperCAmelCase_ : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_ : Dict = 1 - alpha_prod_t
UpperCAmelCase_ : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_ : List[str] = self.betas[t]
UpperCAmelCase_ : int = self.alphas[t]
else:
UpperCAmelCase_ : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase_ : List[str] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase_ : Optional[int] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ : Dict = torch.clamp(
_snake_case ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase_ : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_ : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_ : Union[str, Any] = 0
if t > 0:
UpperCAmelCase_ : Optional[Any] = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=_snake_case ,device=model_output.device )
UpperCAmelCase_ : Any = self._get_variance(
_snake_case ,predicted_variance=_snake_case ,prev_timestep=_snake_case ,)
if self.variance_type == "fixed_small_log":
UpperCAmelCase_ : Union[str, Any] = variance
elif self.variance_type == "learned_range":
UpperCAmelCase_ : List[str] = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
UpperCAmelCase_ : List[Any] = variance * variance_noise
UpperCAmelCase_ : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_snake_case ,pred_original_sample=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
UpperCAmelCase_ : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
UpperCAmelCase_ : str = timesteps.to(original_samples.device )
UpperCAmelCase_ : Dict = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase_ : str = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Dict = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase_ : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_ : Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 71
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ['''keras_nlp''']
def __init__( self : Union[str, Any] ,*_a : List[Any] ,**_a : int ):
'''simple docstring'''
requires_backends(self ,['keras_nlp'] )
| 229
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase : List[Any] = 'nat'
UpperCamelCase : Optional[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Any=64 , lowerCAmelCase : Union[str, Any]=[3, 4, 6, 5] , lowerCAmelCase : Optional[Any]=[2, 4, 8, 16] , lowerCAmelCase : Any=7 , lowerCAmelCase : int=3.0 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Dict=0.0_2 , lowerCAmelCase : Optional[Any]=1E-5 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : str , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =patch_size
SCREAMING_SNAKE_CASE_: Any =num_channels
SCREAMING_SNAKE_CASE_: List[Any] =embed_dim
SCREAMING_SNAKE_CASE_: List[str] =depths
SCREAMING_SNAKE_CASE_: str =len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =num_heads
SCREAMING_SNAKE_CASE_: List[str] =kernel_size
SCREAMING_SNAKE_CASE_: Dict =mlp_ratio
SCREAMING_SNAKE_CASE_: Tuple =qkv_bias
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] =drop_path_rate
SCREAMING_SNAKE_CASE_: List[Any] =hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Optional[int] =initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_: List[str] =int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_: Any =layer_scale_init_value
SCREAMING_SNAKE_CASE_: Any =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCamelCase__ ( _lowercase : str , _lowercase : float | Decimal , _lowercase : float = 1_0**-1_0 ) -> Optional[Any]:
__UpperCAmelCase: Optional[int] = a
while True:
__UpperCAmelCase: Union[str, Any] = Decimal(_lowercase ) - (
Decimal(eval(_lowercase ) ) / Decimal(eval(str(diff(_lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowercase ) ) < precision: # noqa: S307
return float(_lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 523
|
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : float | Decimal , lowercase : float = 10**-10 ):
'''simple docstring'''
lowerCamelCase_ = a
while True:
lowerCamelCase_ = Decimal(lowercase ) - (
Decimal(eval(lowercase ) ) / Decimal(eval(str(diff(lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase ) ) < precision: # noqa: S307
return float(lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 70
| 0
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = set({"(", "[", "{"} )
UpperCamelCase = set({")", "]", "}"} )
UpperCamelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def a__ ( ):
"""simple docstring"""
UpperCamelCase = input("Enter sequence of brackets: " )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE , "is balanced" )
else:
print(_SCREAMING_SNAKE_CASE , "is not balanced" )
if __name__ == "__main__":
main()
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 544
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a : Tuple = logging.get_logger(__name__)
a : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a : int = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Dict ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase_: Dict = getattr(a__ , a__ )
if weight_type is not None:
UpperCAmelCase_: Dict = getattr(a__ , a__ ).shape
else:
UpperCAmelCase_: List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCAmelCase_: int = value
elif weight_type == "weight_g":
UpperCAmelCase_: Any = value
elif weight_type == "weight_v":
UpperCAmelCase_: Optional[int] = value
elif weight_type == "bias":
UpperCAmelCase_: Dict = value
else:
UpperCAmelCase_: Optional[int] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = []
UpperCAmelCase_: str = fairseq_model.state_dict()
UpperCAmelCase_: List[str] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase_: str = None
for name, value in fairseq_dict.items():
UpperCAmelCase_: List[Any] = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase_: Optional[int] = True
elif name.split(""".""" )[0] == "proj":
UpperCAmelCase_: List[Any] = fairseq_model.proj
UpperCAmelCase_: Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase_: List[str] = True
if "*" in mapped_key:
UpperCAmelCase_: Optional[Any] = name.split(a__ )[0].split(""".""" )[-2]
UpperCAmelCase_: Optional[int] = mapped_key.replace("""*""" , a__ )
if "weight_g" in name:
UpperCAmelCase_: Union[str, Any] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase_: Optional[Any] = """weight_v"""
elif "bias" in name:
UpperCAmelCase_: str = """bias"""
elif "weight" in name:
UpperCAmelCase_: str = """weight"""
else:
UpperCAmelCase_: List[Any] = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: str = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase_: int = name.split(""".""" )
UpperCAmelCase_: Optional[Any] = int(items[0] )
UpperCAmelCase_: Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCAmelCase_: Optional[int] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCAmelCase_: Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCAmelCase_: int = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCAmelCase_: str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a__ )
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: int = emb.weight.shape
UpperCAmelCase_: Optional[Any] = nn.Linear(a__ , a__ , bias=a__ )
UpperCAmelCase_: Tuple = emb.weight.data
return lin_layer
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
with open(a__ , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase_: str = f.readlines()
UpperCAmelCase_: Optional[Any] = [line.split(""" """ )[0] for line in lines]
UpperCAmelCase_: Dict = len(a__ )
UpperCAmelCase_: Tuple = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(a__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: Dict , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Any , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Dict , lowerCAmelCase__: Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_: Dict = WavaVecaConfig.from_pretrained(a__ )
UpperCAmelCase_: Dict = SpeechaTextaConfig.from_pretrained(
a__ , vocab_size=a__ , decoder_layers=a__ , do_stable_layer_norm=a__ )
UpperCAmelCase_: int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
UpperCAmelCase_: List[Any] = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase_: Dict = WavaVecaModel(a__ )
UpperCAmelCase_: Union[str, Any] = recursively_load_weights_wavaveca(model.encoder , a__ )
UpperCAmelCase_: Tuple = SpeechaTextaForCausalLM(a__ )
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
UpperCAmelCase_: Dict = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
UpperCAmelCase_: List[str] = SpeechEncoderDecoderModel(encoder=a__ , decoder=a__ )
UpperCAmelCase_: Optional[int] = False
# add projection layer
UpperCAmelCase_: Optional[int] = nn.Parameter(projection_layer.weight )
UpperCAmelCase_: List[Any] = nn.Parameter(projection_layer.bias )
UpperCAmelCase_: Tuple = create_vocab_dict(a__ )
with open(os.path.join(a__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(a__ , a__ )
UpperCAmelCase_: List[Any] = SpeechaTextaTokenizer(os.path.join(a__ , """vocab.json""" ) )
tokenizer.save_pretrained(a__ )
UpperCAmelCase_: Union[str, Any] = hf_wavavec.config.to_dict()
UpperCAmelCase_: Optional[int] = tokenizer.pad_token_id
UpperCAmelCase_: Any = tokenizer.bos_token_id
UpperCAmelCase_: List[str] = tokenizer.eos_token_id
UpperCAmelCase_: Tuple = """speech_to_text_2"""
UpperCAmelCase_: Any = """wav2vec2"""
UpperCAmelCase_: Any = SpeechEncoderDecoderConfig.from_dict(a__ )
hf_wavavec.save_pretrained(a__ )
feature_extractor.save_pretrained(a__ )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=10_224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a : Optional[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 556
|
import copy
import random
from transformers import CLIPTokenizer
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = {}
def _lowerCamelCase ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = super().add_tokens(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def _lowerCamelCase ( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=1 , **__lowerCAmelCase ):
UpperCamelCase__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
output.append(__lowerCAmelCase )
else:
UpperCamelCase__ = []
for i in range(__lowerCAmelCase ):
UpperCamelCase__ = placeholder_token + f"""_{i}"""
self.try_adding_tokens(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
output.append(__lowerCAmelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
UpperCamelCase__ = output
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=1.0 ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = []
for i in range(len(__lowerCAmelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowerCAmelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
UpperCamelCase__ = self.token_map[placeholder_token]
UpperCamelCase__ = tokens[: 1 + int(len(__lowerCAmelCase ) * prop_tokens_to_load )]
if vector_shuffle:
UpperCamelCase__ = copy.copy(__lowerCAmelCase )
random.shuffle(__lowerCAmelCase )
UpperCamelCase__ = text.replace(__lowerCAmelCase , """ """.join(__lowerCAmelCase ) )
return text
def __call__( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=1.0 , **__lowerCAmelCase ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowerCAmelCase , vector_shuffle=__lowerCAmelCase , prop_tokens_to_load=__lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase , )
def _lowerCamelCase ( self , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=1.0 , **__lowerCAmelCase ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowerCAmelCase , vector_shuffle=__lowerCAmelCase , prop_tokens_to_load=__lowerCAmelCase ) , *__lowerCAmelCase , **__lowerCAmelCase , )
| 619
| 0
|
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : int = ""
for word_or_phrase in separated:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(_lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715
|
from __future__ import annotations
__lowerCamelCase : Optional[int] = """Muhammad Umer Farooq"""
__lowerCamelCase : Tuple = """MIT"""
__lowerCamelCase : Optional[int] = """1.0.0"""
__lowerCamelCase : int = """Muhammad Umer Farooq"""
__lowerCamelCase : Optional[int] = """contact@muhammadumerfarooq.me"""
__lowerCamelCase : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __snake_case ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : str = domain
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : Any = parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def A_ ( _lowerCAmelCase ) -> str:
return ".".join(get_sub_domain_name(_lowerCAmelCase ).split("." )[-2:] )
def A_ ( _lowerCAmelCase ) -> str:
return parse.urlparse(_lowerCAmelCase ).netloc
def A_ ( _lowerCAmelCase = "https://github.com" ) -> list[str]:
UpperCamelCase : int = get_domain_name(_lowerCAmelCase )
# Initialize the parser
UpperCamelCase : str = Parser(_lowerCAmelCase )
try:
# Open URL
UpperCamelCase : int = requests.get(_lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Optional[Any] = requests.get(_lowerCAmelCase )
# Get the valid email.
UpperCamelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 38
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[str] , UpperCAmelCase : bool = True , UpperCAmelCase : int = 32 , UpperCAmelCase : Any=PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , **UpperCAmelCase : Dict , ) -> None:
'''simple docstring'''
lowercase : Dict =do_resize
lowercase : Optional[int] =do_rescale
lowercase : Tuple =size_divisor
lowercase : List[str] =resample
super().__init__(**UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[ChannelDimension] = None , **UpperCAmelCase : Any ) -> np.ndarray:
'''simple docstring'''
lowercase , lowercase : Any =get_image_size(UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
lowercase : Optional[Any] =height // size_divisor * size_divisor
lowercase : Union[str, Any] =width // size_divisor * size_divisor
lowercase : List[str] =resize(UpperCAmelCase , (new_h, new_w) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
return image
def A__ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : float , UpperCAmelCase : Optional[ChannelDimension] = None , **UpperCAmelCase : List[str] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[TensorType, str]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Dict , ) -> BatchFeature:
'''simple docstring'''
lowercase : List[Any] =do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : Optional[int] =size_divisor if size_divisor is not None else self.size_divisor
lowercase : Optional[Any] =resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowercase : List[str] =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] =[to_numpy_array(UpperCAmelCase ) for img in images]
if do_resize:
lowercase : int =[self.resize(UpperCAmelCase , size_divisor=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Any =[self.rescale(UpperCAmelCase , scale=1 / 255 ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : List[str] ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 94
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = "poolformer"
def __init__( self : Dict ,_snake_case : Optional[Any]=3 ,_snake_case : Optional[int]=16 ,_snake_case : List[Any]=16 ,_snake_case : List[str]=3 ,_snake_case : List[str]=4.0 ,_snake_case : int=[2, 2, 6, 2] ,_snake_case : Union[str, Any]=[64, 128, 320, 512] ,_snake_case : Any=[7, 3, 3, 3] ,_snake_case : Optional[int]=[4, 2, 2, 2] ,_snake_case : Dict=[2, 1, 1, 1] ,_snake_case : int=4 ,_snake_case : Any=0.0 ,_snake_case : str="gelu" ,_snake_case : int=True ,_snake_case : List[Any]=1e-5 ,_snake_case : str=0.02 ,**_snake_case : Tuple ,) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = num_channels
lowercase__ : Optional[Any] = patch_size
lowercase__ : Optional[Any] = stride
lowercase__ : Optional[Any] = padding
lowercase__ : Optional[Any] = pool_size
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Dict = mlp_ratio
lowercase__ : Any = depths
lowercase__ : Tuple = patch_sizes
lowercase__ : Dict = strides
lowercase__ : Optional[Any] = num_encoder_blocks
lowercase__ : str = drop_path_rate
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Optional[int] = use_layer_scale
lowercase__ : Dict = layer_scale_init_value
lowercase__ : Dict = initializer_range
super().__init__(**_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = version.parse("1.11" )
@property
def UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase ( self : List[str] ) -> float:
"""simple docstring"""
return 2e-3
| 560
| 0
|
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 460
|
'''simple docstring'''
def _snake_case ( A_ : list ):
"""simple docstring"""
if len(A_ ) <= 1:
return [tuple(A_ )]
a_ : List[Any] = []
def generate(A_ : int , A_ : list ):
a_ : List[Any] = [0] * n
res.append(tuple(A_ ) )
a_ : List[str] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
a_ , a_ : Union[str, Any] = arr[i], arr[0]
else:
a_ , a_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(A_ ) )
c[i] += 1
a_ : Optional[Any] = 0
else:
a_ : Union[str, Any] = 0
i += 1
generate(len(A_ ) , A_ )
return res
if __name__ == "__main__":
__snake_case: Dict = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Optional[Any] = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 460
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ):
_UpperCAmelCase : List[Any] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
_UpperCAmelCase : str = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : int = image_size
_UpperCAmelCase : List[Any] = min_resolution
_UpperCAmelCase : List[Any] = max_resolution
_UpperCAmelCase : List[str] = do_resize
_UpperCAmelCase : Dict = size
_UpperCAmelCase : Optional[Any] = do_normalize
_UpperCAmelCase : Any = image_mean
_UpperCAmelCase : Optional[int] = image_std
def snake_case_ (self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _a , unittest.TestCase ):
snake_case : List[str] = ViTImageProcessor if is_vision_available() else None
def snake_case_ (self ):
_UpperCAmelCase : Tuple = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ (self ):
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def snake_case_ (self ):
pass
def snake_case_ (self ):
# Initialize image_processor
_UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase : List[Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def snake_case_ (self ):
# Initialize image_processor
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase : List[str] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def snake_case_ (self ):
# Initialize image_processor
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase : int = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 414
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase ={
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333
| 0
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 703
|
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return []
UpperCAmelCase_ , UpperCAmelCase_ = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
UpperCAmelCase_ = int(max_value - min_value ) + 1
UpperCAmelCase_ = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 14
| 0
|
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
_snake_case = []
_snake_case = []
for rt in rc.restypes:
_snake_case = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_snake_case = {name: i for i, name in enumerate(_SCREAMING_SNAKE_CASE )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_snake_case = torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein["""aatype"""].device , )
_snake_case = torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein["""aatype"""].device , )
_snake_case = torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein["""aatype"""].device , )
_snake_case = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_snake_case = restype_atomaa_to_atomaa[protein_aatype]
_snake_case = restype_atomaa_mask[protein_aatype]
_snake_case = residx_atomaa_mask
_snake_case = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_snake_case = restype_atomaa_to_atomaa[protein_aatype]
_snake_case = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_snake_case = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
_snake_case = rc.restype_atoa[restype_letter]
_snake_case = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_snake_case = rc.atom_order[atom_name]
_snake_case = 1
_snake_case = restype_atomaa_mask[protein_aatype]
_snake_case = residx_atomaa_mask
return protein
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = tree_map(lambda _SCREAMING_SNAKE_CASE : torch.tensor(_SCREAMING_SNAKE_CASE , device=batch["""aatype"""].device ) , _SCREAMING_SNAKE_CASE , np.ndarray )
_snake_case = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : np.array(_SCREAMING_SNAKE_CASE ) , make_atomaa_masks(_SCREAMING_SNAKE_CASE ) )
return out
| 585
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCAmelCase = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=_SCREAMING_SNAKE_CASE , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=_SCREAMING_SNAKE_CASE , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=_SCREAMING_SNAKE_CASE , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=_SCREAMING_SNAKE_CASE , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=_SCREAMING_SNAKE_CASE , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
_snake_case = parser.parse_args()
return args
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
def fn(_SCREAMING_SNAKE_CASE ):
return tokenizer(examples["""text"""] )
return fn
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
_snake_case = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
_snake_case = tf.train.Features(feature=_SCREAMING_SNAKE_CASE )
_snake_case = tf.train.Example(features=_SCREAMING_SNAKE_CASE )
_snake_case = example.SerializeToString()
records.append(_SCREAMING_SNAKE_CASE )
return records
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_snake_case = min(len(_SCREAMING_SNAKE_CASE ) , args.limit )
_snake_case = dataset.select(range(_SCREAMING_SNAKE_CASE ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_snake_case = os.path.join(args.output_dir , args.split )
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
else:
_snake_case = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_snake_case = tokenize_function(_SCREAMING_SNAKE_CASE )
_snake_case = dataset.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_SCREAMING_SNAKE_CASE ):
# Concatenate all texts.
_snake_case = {k: sum(examples[k] , [] ) for k in examples.keys()}
_snake_case = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_snake_case = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_snake_case = {
k: [t[i : i + args.max_length] for i in range(0 , _SCREAMING_SNAKE_CASE , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_snake_case = dataset_tokenized.map(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , batch_size=1000 , num_proc=4 )
_snake_case = 0
_snake_case = 0
for shard in range(0 , len(_SCREAMING_SNAKE_CASE ) , args.shard_size ):
_snake_case = grouped_dataset[shard : shard + args.shard_size]
_snake_case = len(dataset_snapshot["""input_ids"""] )
_snake_case = os.path.join(_SCREAMING_SNAKE_CASE , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
_snake_case = get_serialized_examples(_SCREAMING_SNAKE_CASE )
with tf.io.TFRecordWriter(_SCREAMING_SNAKE_CASE ) as out_file:
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_snake_case = serialized_examples[i]
out_file.write(_SCREAMING_SNAKE_CASE )
print("""Wrote file {} containing {} records""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , """w""" ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCAmelCase = parse_args()
main(args)
| 585
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DiTPipeline
snake_case_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case_ = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
snake_case_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case_ = False
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a_ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=10_00 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a_ , )
lowerCamelCase__ = AutoencoderKL()
lowerCamelCase__ = DDIMScheduler()
lowerCamelCase__ = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _UpperCamelCase ( self : Union[str, Any] , a_ : List[str] , a_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(a_ ).startswith("""mps""" ):
lowerCamelCase__ = torch.manual_seed(a_ )
else:
lowerCamelCase__ = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase__ = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
lowerCamelCase__ = """cpu"""
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowerCamelCase__ = self.get_dummy_inputs(a_ )
lowerCamelCase__ = pipe(**a_ ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCamelCase__ = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a_ , 1e-3 )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=a_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
lowerCamelCase__ = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
lowerCamelCase__ = pipe.get_label_ids(a_ )
lowerCamelCase__ = pipe(a_ , generator=a_ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a_ , a_ ):
lowerCamelCase__ = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
lowerCamelCase__ = ["""vase""", """umbrella"""]
lowerCamelCase__ = pipe.get_label_ids(a_ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = pipe(a_ , generator=a_ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a_ , a_ ):
lowerCamelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 235
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowercase ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self : Dict , a_ : Any , a_ : str ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _UpperCamelCase ( self : Dict , a_ : List[str] , a_ : Optional[int] ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
class lowercase ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=a_ , )
def _UpperCamelCase ( self : Any , a_ : Optional[Any] , a_ : Optional[int] ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _UpperCamelCase ( self : List[Any] , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(a_ )
def snake_case ():
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def snake_case ():
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
@require_beam
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
import apache_beam as beam
lowerCamelCase__ = beam.io.parquetio.WriteToParquet
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
lowerCamelCase__ = partial(a_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=a_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = NestedBeamDataset(cache_dir=a_ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(a_ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , a_ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , a_ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(a_ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 235
| 1
|
def __A ( _lowercase ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
_A ,_A = head.next, head
while fast and fast.next:
_A = fast.next.next
_A = slow.next
_A = slow.next
_A = None # Don't forget here! But forget still works!
# reverse the second part
_A = None
while second:
_A = second.next
_A = node
_A = second
_A = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_A = node.next
_A = head.next
return True
def __A ( _lowercase ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_A = _A = _A = head
while fast and fast.next:
_A ,_A = fast.next.next, slow.next
# 2. Push the second half into the stack
_A = [slow.val]
while slow.next:
_A = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_A = cur.next
return True
def __A ( _lowercase ):
'''simple docstring'''
if not head or not head.next:
return True
_A = {}
_A = 0
while head:
if head.val in d:
d[head.val].append(_lowercase )
else:
_A = [pos]
_A = head.next
pos += 1
_A = pos - 1
_A = 0
for v in d.values():
if len(_lowercase ) % 2 != 0:
middle += 1
else:
_A = 0
for i in range(0 , len(_lowercase ) ):
if v[i] + v[len(_lowercase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 484
|
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = (boundary[1] - boundary[0]) / steps
_A = boundary[0]
_A = boundary[1]
_A = make_points(_lowercase , _lowercase , _lowercase )
_A = 0.0
y += (h / 2.0) * f(_lowercase )
for i in x_i:
# print(i)
y += h * f(_lowercase )
y += (h / 2.0) * f(_lowercase )
return y
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = a + h
while x < (b - h):
yield x
_A = x + h
def __A ( _lowercase ): # enter your function here
'''simple docstring'''
_A = (x - 0) * (x - 0)
return y
def __A ( ):
'''simple docstring'''
_A = 0.0 # Lower bound of integration
_A = 1.0 # Upper bound of integration
_A = 10.0 # define number of steps or resolution
_A = [a, b] # define boundary of integration
_A = method_a(_lowercase , _lowercase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 484
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717
|
'''simple docstring'''
from typing import Any
import numpy as np
def __lowerCAmelCase ( snake_case__ ):
return np.array_equal(snake_case__ , matrix.conjugate().T )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Dict = v.conjugate().T
__UpperCamelCase : int = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__UpperCamelCase : str = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), F"{a} is not hermitian."
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
__UpperCamelCase : Optional[int] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), F"{a} is not hermitian."
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 399
| 0
|
'''simple docstring'''
def _a( UpperCamelCase__ : list, UpperCamelCase__ : list ):
'''simple docstring'''
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase__, UpperCamelCase__ ) ) )
def _a( UpperCamelCase__ : list[float] ):
'''simple docstring'''
if point:
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
for item in point:
if not isinstance(UpperCamelCase__, (int, float) ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(
'''Expected a list of numbers as input, found '''
f"{type(UpperCamelCase__ ).__name__}"
)
raise TypeError(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] =f"Expected a list of numbers as input, found {type(UpperCamelCase__ ).__name__}"
raise TypeError(UpperCamelCase__ )
else:
raise ValueError('''Missing an input''' )
def _a( UpperCamelCase__ : list, UpperCamelCase__ : list ):
'''simple docstring'''
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase__, UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
|
'''simple docstring'''
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : str, UpperCamelCase__ : List[str] ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCamelCase__, n - 1, UpperCamelCase__ ) * a) % mod
else:
SCREAMING_SNAKE_CASE__ : List[Any] =binary_exponentiation(UpperCamelCase__, n / 2, UpperCamelCase__ )
return (b * b) % mod
# a prime number
a_ = 7_0_1
a_ = 1_0_0_0_0_0_0_0_0_0
a_ = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 296
| 1
|
"""simple docstring"""
import sys
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _lowerCAmelCase ( _UpperCamelCase = N ):
"""simple docstring"""
_lowercase: Tuple = -sys.maxsize - 1
for i in range(len(__SCREAMING_SNAKE_CASE ) - 12 ):
_lowercase: List[str] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowercase: str = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 704
|
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
_lowercase: Tuple = [True] * (num + 1)
_lowercase: List[str] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _UpperCamelCase ):
_lowercase: List[str] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : List[Any] = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 272
| 0
|
'''simple docstring'''
lowerCAmelCase__ = range(2, 20 + 1)
lowerCAmelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase__ = {}
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = sum(a_i[j] for j in range(A__ , len(A__ ) ) )
__lowercase = sum(a_i[j] * base[j] for j in range(min(len(A__ ) , A__ ) ) )
__lowercase , __lowercase = 0, 0
__lowercase = n - i
__lowercase = memo.get(A__ )
if sub_memo is not None:
__lowercase = sub_memo.get(A__ )
if jumps is not None and len(A__ ) > 0:
# find and make the largest jump without going over
__lowercase = -1
for _k in range(len(A__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__lowercase = _k
break
if max_jump >= 0:
__lowercase , __lowercase , __lowercase = jumps[max_jump]
# since the difference between jumps is cached, add c
__lowercase = diff + c
for j in range(min(A__ , len(A__ ) ) ):
__lowercase , __lowercase = divmod(A__ , 10 )
if new_c > 0:
add(A__ , A__ , A__ )
else:
__lowercase = []
else:
__lowercase = {c: []}
__lowercase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__lowercase , __lowercase = next_term(A__ , k - 1 , i + dn , A__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__lowercase , __lowercase = compute(A__ , A__ , i + dn , A__ )
diff += _diff
dn += terms_jumped
__lowercase = sub_memo[c]
# keep jumps sorted by # of terms skipped
__lowercase = 0
while j < len(A__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A__ , (diff, dn, k) )
return (diff, dn)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(A__ ):
a_i.extend([0 for _ in range(k - len(A__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__lowercase = i
__lowercase , __lowercase , __lowercase = 0, 0, 0
for j in range(len(A__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__lowercase = ds_c + ds_b
diff += addend
__lowercase = 0
for j in range(A__ ):
__lowercase = a_i[j] + addend
__lowercase , __lowercase = divmod(A__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A__ , A__ , A__ )
return diff, i - start_i
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ , len(A__ ) ):
__lowercase = digits[j] + addend
if s >= 10:
__lowercase , __lowercase = divmod(A__ , 10 )
__lowercase = addend // 10 + quotient
else:
__lowercase = s
__lowercase = addend // 10
if addend == 0:
break
while addend > 0:
__lowercase , __lowercase = divmod(A__ , 10 )
digits.append(A__ )
def _A ( A__ = 10**15 ):
"""simple docstring"""
__lowercase = [1]
__lowercase = 1
__lowercase = 0
while True:
__lowercase , __lowercase = next_term(A__ , 20 , i + dn , A__ )
dn += terms_jumped
if dn == n - i:
break
__lowercase = 0
for j in range(len(A__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 41
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
snake_case_ = ["""input_features""", """attention_mask"""]
def __init__( self : Any ,A : str=80 ,A : Optional[int]=16_000 ,A : int=0.0 ,A : str=10 ,A : Any=25 ,A : str="hamming_window" ,A : int=3_2_7_6_8.0 ,A : List[str]=0.9_7 ,A : Optional[int]=1.0 ,A : Optional[Any]=True ,A : Tuple=True ,A : Any=False ,**A : int ,):
'''simple docstring'''
super().__init__(feature_size=A ,sampling_rate=A ,padding_value=A ,**A )
UpperCAmelCase__ : str = feature_size
UpperCAmelCase__ : int = sampling_rate
UpperCAmelCase__ : int = padding_value
UpperCAmelCase__ : Dict = hop_length
UpperCAmelCase__ : int = win_length
UpperCAmelCase__ : Dict = frame_signal_scale
UpperCAmelCase__ : Dict = preemphasis_coeff
UpperCAmelCase__ : str = mel_floor
UpperCAmelCase__ : Any = normalize_means
UpperCAmelCase__ : str = normalize_vars
UpperCAmelCase__ : int = win_function
UpperCAmelCase__ : List[Any] = return_attention_mask
UpperCAmelCase__ : str = win_length * sampling_rate // 1_000
UpperCAmelCase__ : List[Any] = hop_length * sampling_rate // 1_000
UpperCAmelCase__ : int = optimal_fft_length(self.sample_size )
UpperCAmelCase__ : List[Any] = (self.n_fft // 2) + 1
def __lowercase ( self : Union[str, Any] ,A : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=A )
else:
UpperCAmelCase__ : Any = window_function(window_length=self.sample_size ,name=self.win_function )
UpperCAmelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
UpperCAmelCase__ : Optional[Any] = spectrogram(
one_waveform * self.frame_signal_scale ,window=A ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=A ,preemphasis=self.preemphasis_coeff ,mel_filters=A ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def __lowercase ( self : str ,A : Any ,A : Optional[int] ,A : str ):
'''simple docstring'''
# make sure we normalize float32 arrays
if self.normalize_means:
UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 )
UpperCAmelCase__ : Any = np.subtract(A ,A )
if self.normalize_vars:
UpperCAmelCase__ : str = x[:input_length].std(axis=0 )
UpperCAmelCase__ : Optional[int] = np.divide(A ,A )
if input_length < x.shape[0]:
UpperCAmelCase__ : int = padding_value
# make sure array is in float32
UpperCAmelCase__ : str = x.astype(np.floataa )
return x
def __lowercase ( self : Union[str, Any] ,A : List[np.ndarray] ,A : Optional[np.ndarray] = None ):
'''simple docstring'''
UpperCAmelCase__ : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A ,A ,self.padding_value ) for x, n in zip(A ,A )]
def __call__( self : Union[str, Any] ,A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,A : Union[bool, str, PaddingStrategy] = False ,A : Optional[int] = None ,A : bool = False ,A : Optional[int] = None ,A : Optional[bool] = None ,A : Optional[Union[str, TensorType]] = None ,A : Optional[int] = None ,**A : Tuple ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase__ : Optional[Any] = isinstance(A ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase__ : Any = is_batched_numpy or (
isinstance(A ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ : List[str] = [np.asarray(A ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A ,np.ndarray ):
UpperCAmelCase__ : Union[str, Any] = np.asarray(A ,dtype=np.floataa )
elif isinstance(A ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ : Optional[Any] = [raw_speech]
# extract fbank features
UpperCAmelCase__ : Tuple = [self._extract_mfsc_features(A ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase__ : str = BatchFeature({"""input_features""": features} )
UpperCAmelCase__ : Optional[Any] = self.pad(
A ,padding=A ,max_length=A ,truncation=A ,pad_to_multiple_of=A ,return_attention_mask=A ,**A ,)
# make sure list is in array format
UpperCAmelCase__ : Tuple = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,A ):
UpperCAmelCase__ : Union[str, Any] = [np.asarray(A ,dtype=np.floataa ) for feature in input_features]
UpperCAmelCase__ : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase__ : str = [np.asarray(A ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase__ : Union[str, Any] = (
np.array(A ,dtype=np.intaa )
if self._get_padding_strategies(A ,max_length=A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase__ : Any = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=A )
if return_tensors is not None:
UpperCAmelCase__ : Union[str, Any] = padded_inputs.convert_to_tensors(A )
return padded_inputs
| 65
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Dict = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = "xlm-roberta"
def __init__( self : Union[str, Any] , _snake_case : List[Any]=30_522 , _snake_case : Union[str, Any]=768 , _snake_case : Tuple=12 , _snake_case : str=12 , _snake_case : Optional[int]=3_072 , _snake_case : Optional[int]="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : List[str]=512 , _snake_case : Dict=2 , _snake_case : str=0.0_2 , _snake_case : List[Any]=1e-12 , _snake_case : Union[str, Any]=1 , _snake_case : List[Any]=0 , _snake_case : Optional[Any]=2 , _snake_case : str="absolute" , _snake_case : int=True , _snake_case : Dict=None , **_snake_case : Dict , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
@property
def lowerCamelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 482
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = TransfoXLTokenizer
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
A_ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : str , **_snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
A_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def lowerCamelCase__ ( self : int , _snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
A_ = "<unk> UNwanted , running"
A_ = "<unk> unwanted, running"
return input_text, output_text
def lowerCamelCase__ ( self : Dict ) -> int:
"""simple docstring"""
A_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_snake_case )
A_ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(_snake_case , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [0, 4, 8, 7] )
def lowerCamelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
A_ = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
A_ = TransfoXLTokenizer(lower_case=_snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
A_ = TransfoXLTokenizer(lower_case=_snake_case )
A_ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
A_ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(_snake_case ) , _snake_case )
self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case ) , _snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = self.get_tokenizer()
A_ = len(_snake_case )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_snake_case ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 482
| 1
|
'''simple docstring'''
def a ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
__lowerCAmelCase =generate_large_matrix()
__lowerCAmelCase =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
assert all(row == sorted(_A , reverse=_A ) for row in grid )
assert all(list(_A ) == sorted(_A , reverse=_A ) for col in zip(*_A ) )
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
a_ = 0
a_ = len(_A ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
a_ = (left + right) // 2
a_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
a_ = mid + 1
else:
a_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_A )
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
a_ = 0
a_ = len(grid[0] )
for i in range(len(_A ) ):
a_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(_A ) * len(grid[0] )) - total
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def a ( _UpperCAmelCase ) -> int:
"""simple docstring"""
a_ = 0
for row in grid:
for i, number in enumerate(_A ):
if number < 0:
total += len(_A ) - i
break
return total
def a ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Running benchmarks' )
a_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
a_ = timeit(F'''{func}(grid=grid)''' , setup=_A , number=5_0_0 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 697
|
'''simple docstring'''
from __future__ import annotations
import math
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCAmelCase = f"{num}: Invalid input, please enter a positive integer."
raise ValueError(_A )
lowerCAmelCase = [True] * (num + 1)
lowerCAmelCase = []
lowerCAmelCase = 2
lowerCAmelCase = int(math.sqrt(_A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_A )
# Set multiples of start be False
for i in range(start * start , num + 1 , _A ):
if sieve[i] is True:
lowerCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 370
| 0
|
import comet # From: unbabel-comet
import torch
import datasets
__UpperCAmelCase : List[Any] = datasets.logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__UpperCAmelCase : Union[str, Any] = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__UpperCAmelCase : List[str] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCAmelCase_ ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="https://unbabel.github.io/COMET/html/index.html" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"sources": datasets.Value("string" ,id="sequence" ),
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Value("string" ,id="sequence" ),
} ) ,codebase_urls=["https://github.com/Unbabel/COMET"] ,reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
if self.config_name == "default":
snake_case__ :List[Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
snake_case__ :Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,UpperCamelCase=False ) -> Tuple:
if gpus is None:
snake_case__ :Optional[Any] = 1 if torch.cuda.is_available() else 0
snake_case__ :Optional[int] = {"src": sources, "mt": predictions, "ref": references}
snake_case__ :str = [dict(zip(UpperCamelCase__ ,UpperCamelCase__ ) ) for t in zip(*data.values() )]
snake_case__ , snake_case__ :Optional[int] = self.scorer.predict(UpperCamelCase__ ,gpus=UpperCamelCase__ ,progress_bar=UpperCamelCase__ )
return {"mean_score": mean_score, "scores": scores}
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'vit_mae'
def __init__( self : str , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : int=3072 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1e-12 , __SCREAMING_SNAKE_CASE : Any=224 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Dict=8 , __SCREAMING_SNAKE_CASE : Dict=2048 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.75 , __SCREAMING_SNAKE_CASE : Any=False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Optional[Any]:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =qkv_bias
__UpperCAmelCase =decoder_num_attention_heads
__UpperCAmelCase =decoder_hidden_size
__UpperCAmelCase =decoder_num_hidden_layers
__UpperCAmelCase =decoder_intermediate_size
__UpperCAmelCase =mask_ratio
__UpperCAmelCase =norm_pix_loss
| 68
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Any = HfArgumentParser(__snake_case )
_lowerCamelCase : int = parser.parse_args_into_dataclasses()[0]
_lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case )
try:
_lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
_lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] )
_lowerCamelCase : Dict = """"""
_lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] )
_lowerCamelCase : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__snake_case )
if len(__snake_case ) > 0:
_lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case )
raise ValueError(__snake_case )
benchmark.run()
if __name__ == "__main__":
main()
| 88
| 0
|
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE : Union[str, Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Tuple = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase_ : Dict = get_sagemaker_input()
else:
UpperCamelCase_ : Dict = get_cluster_input()
return config
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Any=None ):
if subparsers is not None:
UpperCamelCase_ : Optional[int] = subparsers.add_parser("""config""" , description=_lowerCamelCase )
else:
UpperCamelCase_ : Dict = argparse.ArgumentParser("""Accelerate config command""" , description=_lowerCamelCase )
parser.add_argument(
"""--config_file""" , default=_lowerCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict ):
UpperCamelCase_ : Dict = get_user_input()
if args.config_file is not None:
UpperCamelCase_ : str = args.config_file
else:
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
UpperCamelCase_ : Any = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_lowerCamelCase )
else:
config.to_yaml_file(_lowerCamelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : int = config_command_parser()
UpperCamelCase_ : int = parser.parse_args()
config_command(_lowerCamelCase )
if __name__ == "__main__":
main()
| 700
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class UpperCamelCase ( __a ):
a__ :Any = 1
a__ :Union[str, Any] = 2
a__ :Union[str, Any] = 3
a__ :int = 4
a__ :int = 5
@dataclass
class UpperCamelCase ( __a ):
a__ :jnp.ndarray
class UpperCamelCase :
a__ :Union[str, Any] = SCHEDULER_CONFIG_NAME
a__ :Union[str, Any] = ['''dtype''']
a__ :str = []
a__ :Dict = True
@classmethod
def A_ (cls , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=False , **__UpperCamelCase , ) -> Optional[int]:
UpperCamelCase_,UpperCamelCase_ : Optional[int] = cls.load_config(
pretrained_model_name_or_path=__UpperCamelCase , subfolder=__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase , )
UpperCamelCase_,UpperCamelCase_ : Dict = cls.from_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase )
if hasattr(__UpperCamelCase , """create_state""" ) and getattr(__UpperCamelCase , """has_state""" , __UpperCamelCase ):
UpperCamelCase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def A_ (self , __UpperCamelCase , __UpperCamelCase = False , **__UpperCamelCase ) -> Union[str, Any]:
self.save_config(save_directory=__UpperCamelCase , push_to_hub=__UpperCamelCase , **__UpperCamelCase )
@property
def A_ (self ) -> Dict:
return self._get_compatibles()
@classmethod
def A_ (cls ) -> Dict:
UpperCamelCase_ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase_ : Any = importlib.import_module(__name__.split(""".""" )[0] )
UpperCamelCase_ : Dict = [
getattr(__UpperCamelCase , __UpperCamelCase ) for c in compatible_classes_str if hasattr(__UpperCamelCase , __UpperCamelCase )
]
return compatible_classes
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : Tuple[int] ):
assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int]=0.999 , _SCREAMING_SNAKE_CASE : List[str]=jnp.floataa ):
def alpha_bar(_SCREAMING_SNAKE_CASE : int ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
UpperCamelCase_ : List[str] = []
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase_ : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class UpperCamelCase :
a__ :jnp.ndarray
a__ :jnp.ndarray
a__ :jnp.ndarray
@classmethod
def A_ (cls , __UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : Optional[Any] = scheduler.config
if config.trained_betas is not None:
UpperCamelCase_ : Any = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase_ : Dict = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase_ : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase_ : Dict = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
UpperCamelCase_ : Optional[int] = 1.0 - betas
UpperCamelCase_ : int = jnp.cumprod(__UpperCamelCase , axis=0 )
return cls(
alphas=__UpperCamelCase , betas=__UpperCamelCase , alphas_cumprod=__UpperCamelCase , )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ):
UpperCamelCase_ : Tuple = state.alphas_cumprod
UpperCamelCase_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase_ : Optional[int] = sqrt_alpha_prod.flatten()
UpperCamelCase_ : Any = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
UpperCamelCase_ : str = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase_ : List[Any] = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase_ : Any = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ):
UpperCamelCase_,UpperCamelCase_ : Optional[int] = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : CommonSchedulerState , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray , _SCREAMING_SNAKE_CASE : jnp.ndarray ):
UpperCamelCase_,UpperCamelCase_ : Dict = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : str = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 138
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a =logging.getLogger(__name__)
@dataclass
class A_ :
_UpperCAmelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCAmelCase : Optional[str] = field(
default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=__snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase : Optional[str] = field(
default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCAmelCase : bool = field(default=__snake_case , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCAmelCase : bool = field(default=__snake_case , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class A_ :
_UpperCAmelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCAmelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCAmelCase : Optional[int] = field(
default=1_024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCAmelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCAmelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCAmelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCAmelCase : Optional[str] = field(default=__snake_case , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCAmelCase : Optional[str] = field(default=__snake_case , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCAmelCase : Optional[int] = field(default=__snake_case , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCAmelCase : bool = field(
default=__snake_case , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , F"{split}_results.json" ) )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase : int = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
assert hasattr(_lowerCAmelCase , _lowerCAmelCase ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase : str = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__lowerCamelCase : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCamelCase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__lowerCamelCase : str = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__lowerCamelCase : List[Any] = SeqaSeqDataset
# Get datasets
__lowerCamelCase : Union[str, Any] = (
dataset_class(
_lowerCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
__lowerCamelCase : Optional[Any] = (
dataset_class(
_lowerCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__lowerCamelCase : Any = (
dataset_class(
_lowerCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__lowerCamelCase : str = (
build_compute_metrics_fn(data_args.task , _lowerCAmelCase ) if training_args.predict_with_generate else None
)
__lowerCamelCase : List[Any] = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , data_collator=SeqaSeqDataCollator(
_lowerCAmelCase , _lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
__lowerCamelCase : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
__lowerCamelCase : int = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__lowerCamelCase : Tuple = train_result.metrics
__lowerCamelCase : int = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : Tuple = trainer.evaluate(metric_key_prefix='val' )
__lowerCamelCase : Any = data_args.n_val
__lowerCamelCase : Union[str, Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
__lowerCamelCase : Union[str, Any] = trainer.predict(test_dataset=_lowerCAmelCase , metric_key_prefix='test' )
__lowerCamelCase : List[Any] = test_output.metrics
__lowerCamelCase : List[str] = data_args.n_test
if trainer.is_world_process_zero():
__lowerCamelCase : Dict = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _lowerCAmelCase , training_args.output_dir )
all_metrics.update(_lowerCAmelCase )
if training_args.predict_with_generate:
__lowerCamelCase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
__lowerCamelCase : Any = lmap(str.strip , _lowerCAmelCase )
write_txt_file(_lowerCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_lowerCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 652
|
__lowerCamelCase : str = 6_5521
def A_ ( _lowerCAmelCase ) -> int:
UpperCamelCase : Any = 1
UpperCamelCase : str = 0
for plain_chr in plain_text:
UpperCamelCase : List[Any] = (a + ord(_lowerCAmelCase )) % MOD_ADLER
UpperCamelCase : List[Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 629
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ ={"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ =[
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Any =BertJapaneseTokenizer
__a : Optional[int] =False
__a : int =True
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase = self.get_input_output_texts(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def __snake_case ( self ):
try:
lowerCAmelCase = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def __snake_case ( self ):
lowerCAmelCase = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCAmelCase_ )
lowerCAmelCase = '''こんにちは、世界。\nこんばんは、世界。'''
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase_ , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , '''rb''' ) as handle:
lowerCAmelCase = pickle.load(UpperCAmelCase_ )
lowerCAmelCase = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(normalize_text=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def __snake_case ( self ):
lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowerCAmelCase = tokenizer.subword_tokenizer
lowerCAmelCase = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowerCAmelCase = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Union[str, Any] =BertJapaneseTokenizer
__a : Optional[int] =False
def __snake_case ( self ):
super().setUp()
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self , **UpperCAmelCase_ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ ):
lowerCAmelCase = '''こんにちは、世界。 \nこんばんは、世界。'''
lowerCAmelCase = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
lowerCAmelCase = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCAmelCase_ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __snake_case ( self ):
lowerCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = i
lowerCAmelCase = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def __snake_case ( self ):
lowerCAmelCase = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowerCAmelCase = tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
lowerCAmelCase = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowerCAmelCase = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 33
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ,_a : float ):
'''simple docstring'''
return 0.0
def UpperCAmelCase_ (__a : np.ndarray , __a : int ):
"""simple docstring"""
_a : Optional[Any] = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_a : Optional[Any] = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase_ (__a : FilterType , __a : int ):
"""simple docstring"""
_a : Any = 5_1_2
_a : Tuple = [1] + [0] * (size - 1)
_a : Union[str, Any] = [filter_type.process(__a ) for item in inputs]
_a : str = [0] * (samplerate - size) # zero-padding
outputs += filler
_a : Union[str, Any] = np.abs(np.fft.fft(__a ) )
_a : int = 2_0 * np.logaa(__a )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_a : List[str] = get_bounds(__a , __a )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__a )
plt.show()
def UpperCAmelCase_ (__a : FilterType , __a : int ):
"""simple docstring"""
_a : Any = 5_1_2
_a : int = [1] + [0] * (size - 1)
_a : List[Any] = [filter_type.process(__a ) for item in inputs]
_a : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
_a : Dict = np.angle(np.fft.fft(__a ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__a , -2 * pi ) )
plt.show()
| 229
|
'''simple docstring'''
import operator as op
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = []
_a : List[str] = lambda __a , __a : int(x / y ) # noqa: E731 integer division operation
_a : List[Any] = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(1_2 ) , 'Stack' , sep=' | ' )
print('-' * (3_0 + len(__a )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__a ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
else:
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
_a : str = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' )
stack.append(
str(opr[x](int(__a ) , int(__a ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(1_2 ) , ','.join(__a ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__lowerCAmelCase = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 229
| 1
|
from collections.abc import Callable
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =a
SCREAMING_SNAKE_CASE =b
if function(lowerCAmelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase_ ) == 0:
return b
elif (
function(lowerCAmelCase_ ) * function(lowerCAmelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
SCREAMING_SNAKE_CASE =start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase_ ) == 0:
return mid
elif function(lowerCAmelCase_ ) * function(lowerCAmelCase_ ) < 0:
SCREAMING_SNAKE_CASE =mid
else:
SCREAMING_SNAKE_CASE =mid
SCREAMING_SNAKE_CASE =start + (end - start) / 2.0
return mid
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 252
|
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = ['torch', 'scipy']
def __init__( self : Any ,*snake_case : Any ,**snake_case : str ):
requires_backends(self ,['torch', 'scipy'] )
@classmethod
def _lowerCAmelCase ( cls : Tuple ,*snake_case : Optional[Any] ,**snake_case : int ):
requires_backends(cls ,['torch', 'scipy'] )
@classmethod
def _lowerCAmelCase ( cls : Optional[int] ,*snake_case : int ,**snake_case : Dict ):
requires_backends(cls ,['torch', 'scipy'] )
| 252
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A__ ( unittest.TestCase ):
__UpperCamelCase : Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def __UpperCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __UpperCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
_a : List[Any] =pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_a : Optional[Any] =unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8_0_1_5, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5_5_0_6, """token_str""": """ accuser"""},
] , )
_a : Tuple =unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8_0_1_5,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5_5_0_6,
"""token_str""": """ accuser""",
},
] , )
_a : int =unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2_9_4_1, """token_str""": """ Te"""},
] , )
@require_torch
def __UpperCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
_a : int =pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_a : Dict =unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5_6_7_6, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""},
] , )
_a : Tuple =unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""},
] , )
_a : List[Any] =unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2_9_4_1, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
] , )
_a : int =unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5_6_7_6,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : Tuple =pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_a : List[Any] =pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_torch
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
_a : Any =pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(_SCREAMING_SNAKE_CASE )
@slow
@require_tf
def __UpperCAmelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
_a : Optional[int] =pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> Any:
'''simple docstring'''
_a : Dict =unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 6_1_0, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_5_7_3, """token_str""": """ Chris"""},
] , )
_a : Dict =unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_2_0_1,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2_7_9_0,
"""token_str""": """ Lyon""",
},
] , )
_a : Optional[int] =unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_4_9_9, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3_6_0_6, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_9_4_1, """token_str""": """ Te"""},
] , )
@require_torch
def __UpperCAmelCase ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
_a : Any =pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_a : List[Any] =None
_a : Union[str, Any] =None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
@require_tf
def __UpperCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
_a : Union[str, Any] =pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_a : Optional[Any] =None
_a : str =None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> Optional[Any]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_a : Tuple =FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_a : Dict =[
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Dict ) -> Dict:
'''simple docstring'''
_a : int =fill_masker.tokenizer
_a : Optional[Any] =fill_masker.model
_a : List[str] =fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
_a : List[str] =fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
_a : Dict =fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
],
[
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker("""This is""" )
self.run_test_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_top_k_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_duplicate_targets_and_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_multiple_masks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_a : Any =tokenizer.get_vocab()
_a : int =sorted(vocab.keys() )[:2]
# Pipeline argument
_a : int =FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , targets=_SCREAMING_SNAKE_CASE )
_a : Dict =fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
_a : int ={vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _SCREAMING_SNAKE_CASE )
_a : Optional[int] =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Call argument
_a : List[str] =FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_a : Any =fill_masker(f"This is a {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
_a : Optional[Any] ={vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , _SCREAMING_SNAKE_CASE )
_a : List[Any] =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Score equivalence
_a : Optional[Any] =fill_masker(f"This is a {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE )
_a : str =[top_mask["""token_str"""] for top_mask in outputs]
_a : str =[top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ) == set(_SCREAMING_SNAKE_CASE ):
_a : List[Any] =fill_masker(f"This is a {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE )
_a : List[Any] =[top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
# Raises with invalid
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_a : Optional[int] =fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_a : Any =fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""""""] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_a : List[Any] =fill_masker(f"This is a {tokenizer.mask_token}" , targets="""""" )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :List[str] ) -> List[Any]:
'''simple docstring'''
_a : List[str] =FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , top_k=2 )
_a : str =fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
_a : Tuple =FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_a : str =fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
] , )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
_a : Optional[int] =tokenizer.get_vocab()
_a : str =FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# top_k=2, ntargets=3
_a : str =sorted(vocab.keys() )[:3]
_a : Dict =fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=_SCREAMING_SNAKE_CASE )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_a : Tuple =[el["""token_str"""] for el in sorted(_SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=_SCREAMING_SNAKE_CASE )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ).issubset(_SCREAMING_SNAKE_CASE ):
_a : List[Any] =fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=_SCREAMING_SNAKE_CASE )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Dict ) -> str:
'''simple docstring'''
_a : Tuple =FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_a : int =tokenizer.get_vocab()
# String duplicates + id duplicates
_a : str =sorted(vocab.keys() )[:3]
_a : Union[str, Any] =[targets[0], targets[1], targets[0], targets[2], targets[1]]
_a : List[Any] =fill_masker(f"My name is {tokenizer.mask_token}" , targets=_SCREAMING_SNAKE_CASE , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3 )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
_a : str =FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_a : List[str] =fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
],
[
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
],
[
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
{"""sequence""": ANY(_SCREAMING_SNAKE_CASE ), """score""": ANY(_SCREAMING_SNAKE_CASE ), """token""": ANY(_SCREAMING_SNAKE_CASE ), """token_str""": ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
| 694
|
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
UpperCamelCase = len(_UpperCamelCase) if (len(_UpperCamelCase) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8) , 'Stack'.center(_UpperCamelCase) , 'Postfix'.center(_UpperCamelCase) , sep=' | ' , )
print('-' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCamelCase) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCamelCase) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCamelCase) == 0:
stack.append(_UpperCamelCase) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCamelCase) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(_UpperCamelCase) # push x to stack
print(
x.center(8) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , sep=' | ' , ) # Output in tabular format
while len(_UpperCamelCase) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
' '.center(8) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , sep=' | ' , ) # Output in tabular format
return "".join(_UpperCamelCase) # return Postfix as str
def lowercase__ ( _UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = list(infix[::-1]) # reverse the infix equation
for i in range(len(_UpperCamelCase)):
if infix[i] == "(":
UpperCamelCase = ')' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase = '(' # change ")" to "("
return (infix_2_postfix(''.join(_UpperCamelCase)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__magic_name__ : int = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__magic_name__ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 280
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
@require_torch
def __lowerCamelCase ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ : Optional[int] ='\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ : List[str] ='\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ : Optional[int] ='\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ : str ='hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='fill-mask' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ : Any =[sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ : Any =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ : int ='1'
SCREAMING_SNAKE_CASE_ : Optional[int] =subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __lowerCamelCase ( self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ : Tuple ='\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ : int ='\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ : int ='\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ : str ='hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='fill-mask' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ : Any =[sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ : str =self.get_env()
SCREAMING_SNAKE_CASE_ : Tuple =subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __lowerCamelCase ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ : Optional[Any] ='\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ : Tuple ='\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ : List[str] ='\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ : int =[sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ : List[str] =self.get_env()
SCREAMING_SNAKE_CASE_ : List[Any] =subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ : Optional[int] =[sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ : Dict ='1'
SCREAMING_SNAKE_CASE_ : Dict =subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] ='\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ : Optional[int] ='\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ : Optional[Any] ='\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_env()
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='1'
SCREAMING_SNAKE_CASE_ : Optional[Any] =[sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] ='\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ : int ='\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ : int =[sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_env()
SCREAMING_SNAKE_CASE_ : List[str] =subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ : Any ='1'
SCREAMING_SNAKE_CASE_ : int =subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 705
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ :
'''simple docstring'''
_lowercase = None
@experimental
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : str ,lowerCAmelCase_ : int ,lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : str ) -> List[Any]:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
return _map_with_joblib(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : int ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =num_proc if num_proc <= len(lowerCAmelCase_ ) else len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =[] # We organize the splits ourselve (contiguous splits)
for index in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] =len(lowerCAmelCase_ ) // num_proc
SCREAMING_SNAKE_CASE_ : List[Any] =len(lowerCAmelCase_ ) % num_proc
SCREAMING_SNAKE_CASE_ : List[str] =div * index + min(lowerCAmelCase_ ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowerCAmelCase_ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowerCAmelCase_ )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowerCAmelCase_ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] =(RLock(),), tqdm.set_lock
with Pool(lowerCAmelCase_ ,initargs=lowerCAmelCase_ ,initializer=lowerCAmelCase_ ) as pool:
SCREAMING_SNAKE_CASE_ : Optional[int] =pool.map(lowerCAmelCase_ ,lowerCAmelCase_ )
logger.info(F"""Finished {num_proc} processes""" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowerCAmelCase_ )} objects""" )
return mapped
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=lowerCAmelCase_ ):
return joblib.Parallel()(
joblib.delayed(lowerCAmelCase_ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE_ : str =None
| 153
| 0
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = data
def __iter__( self : Dict ):
'''simple docstring'''
for element in self.data:
yield element
def __magic_name__ ( __UpperCAmelCase=True ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Accelerator(even_batches=lowerCAmelCase_ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ) -> Union[str, Any]:
'''simple docstring'''
if iterable:
__SCREAMING_SNAKE_CASE = DummyIterableDataset(torch.as_tensor(range(lowerCAmelCase_ ) ) )
else:
__SCREAMING_SNAKE_CASE = TensorDataset(torch.as_tensor(range(lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase_ )
return dl
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = create_dataloader(accelerator=lowerCAmelCase_ , dataset_size=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowerCAmelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowerCAmelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = create_accelerator(even_batches=lowerCAmelCase_ )
verify_dataloader_batch_sizes(
lowerCAmelCase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowerCAmelCase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = create_accelerator(even_batches=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
__SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1 )
__SCREAMING_SNAKE_CASE = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = ddp_model(batch[0].float() )
__SCREAMING_SNAKE_CASE = output.sum()
loss.backward()
batch_idxs.append(lowerCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
with warnings.catch_warnings(record=lowerCAmelCase_ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , lowerCAmelCase_ )
assert "only supported for multi-GPU" in str(w[-1].message )
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = create_accelerator(even_batches=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
__SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1 )
__SCREAMING_SNAKE_CASE = create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = train_dl.batch_sampler.even_batches
__SCREAMING_SNAKE_CASE = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = create_accelerator(even_batches=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
__SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase_ )
create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = create_accelerator()
__SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 )
__SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase_ )
create_dataloader(lowerCAmelCase_ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase_ )
with warnings.catch_warnings(record=lowerCAmelCase_ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase_ ):
pass
assert issubclass(w[-1].category , lowerCAmelCase_ )
assert "only supported for map-style datasets" in str(w[-1].message )
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
__SCREAMING_SNAKE_CASE = accelerator.state.distributed_type
__SCREAMING_SNAKE_CASE = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = original_state
if __name__ == "__main__":
main()
| 109
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : str = logging.get_logger(__name__)
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE__ = [1_4_4, 1_9_2, 2_4_0]
SCREAMING_SNAKE_CASE__ = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE__ = [9_6, 1_2_0, 1_4_4]
SCREAMING_SNAKE_CASE__ = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE__ = [6_4, 8_0, 9_6]
SCREAMING_SNAKE_CASE__ = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
SCREAMING_SNAKE_CASE__ = 0.05
SCREAMING_SNAKE_CASE__ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
SCREAMING_SNAKE_CASE__ = 5_1_2
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 2_1
SCREAMING_SNAKE_CASE__ = '''pascal-voc-id2label.json'''
else:
SCREAMING_SNAKE_CASE__ = 1_0_0_0
SCREAMING_SNAKE_CASE__ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]:
for i in range(1 , 6 ):
if f'''layer_{i}.''' in name:
SCREAMING_SNAKE_CASE__ = name.replace(f'''layer_{i}.''' , f'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f'''.{i}.{j}.''' in name:
SCREAMING_SNAKE_CASE__ = name.replace(f'''.{i}.{j}.''' , f'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f'''.{i}.{j}.''' in name:
SCREAMING_SNAKE_CASE__ = name.replace(f'''.{i}.{j}.''' , f'''.{i}.''' )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f'''.global_rep.{i}.weight''' in name:
SCREAMING_SNAKE_CASE__ = name.replace(f'''.global_rep.{i}.weight''' , '''.layernorm.weight''' )
if f'''.global_rep.{i}.bias''' in name:
SCREAMING_SNAKE_CASE__ = name.replace(f'''.global_rep.{i}.bias''' , '''.layernorm.bias''' )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE__ = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE__ = '''mobilevit.''' + name
return name
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[Any]:
if base_model:
SCREAMING_SNAKE_CASE__ = ''''''
else:
SCREAMING_SNAKE_CASE__ = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(lowerCAmelCase_ )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE__ = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split('''.''' )
SCREAMING_SNAKE_CASE__ = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = model.get_submodule(f'''{model_prefix}encoder.layer.{layer_num}''' )
SCREAMING_SNAKE_CASE__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE__ = (
f'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def __snake_case ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
SCREAMING_SNAKE_CASE__ = get_mobilevit_config(lowerCAmelCase_ )
# load original state_dict
SCREAMING_SNAKE_CASE__ = torch.load(lowerCAmelCase_ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
SCREAMING_SNAKE_CASE__ = MobileViTForSemanticSegmentation(lowerCAmelCase_ ).eval()
else:
SCREAMING_SNAKE_CASE__ = MobileViTForImageClassification(lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE__ = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE__ = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(f'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
SCREAMING_SNAKE_CASE__ = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
SCREAMING_SNAKE_CASE__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCAmelCase_ , organization='''apple''' )
model.push_to_hub(lowerCAmelCase_ , organization='''apple''' )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_A : str = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 100
| 0
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=1_0_2_4 , UpperCamelCase__ : Optional[Any]=1_0_2_4 , UpperCamelCase__ : Dict=3.6 ):
"""simple docstring"""
UpperCamelCase = tokenizer
UpperCamelCase = tokenizer.bos_token_id
UpperCamelCase = dataset
UpperCamelCase = seq_length
UpperCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self : str ):
"""simple docstring"""
UpperCamelCase = iter(self.dataset )
UpperCamelCase = True
while more_examples:
UpperCamelCase , UpperCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCamelCase__ )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCamelCase = False
break
UpperCamelCase = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__ )['input_ids']
UpperCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCamelCase__ ) , self.seq_length ):
UpperCamelCase = all_token_ids[i : i + self.seq_length]
if len(UpperCamelCase__ ) == self.seq_length:
yield torch.tensor(UpperCamelCase__ )
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = {'streaming': True}
UpperCamelCase = load_dataset(args.dataset_name , split='train' , **A__ )
UpperCamelCase = ConstantLengthDataset(A__ , A__ , seq_length=args.seq_length )
UpperCamelCase = DataLoader(A__ , batch_size=args.batch_size )
return eval_dataloader
def __lowerCamelCase ( A__ ) -> Optional[Any]:
"""simple docstring"""
model.eval()
UpperCamelCase = []
for step, batch in enumerate(A__ ):
with torch.no_grad():
UpperCamelCase = model(A__ , labels=A__ )
UpperCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(A__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCamelCase = torch.mean(torch.cat(A__ ) )
try:
UpperCamelCase = torch.exp(A__ )
except OverflowError:
UpperCamelCase = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
_lowerCamelCase : Tuple = Accelerator()
# Parse configuration
_lowerCamelCase : List[str] = HfArgumentParser(EvaluationArguments)
_lowerCamelCase : str = parser.parse_args()
set_seed(args.seed)
# Logging
_lowerCamelCase : str = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_lowerCamelCase : Tuple = create_dataloader(args)
# Prepare everything with our `accelerator`.
_lowerCamelCase : Union[str, Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
_lowerCamelCase : Dict = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 711
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : TransformeraDModel , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : KarrasDiffusionSchedulers , UpperCamelCase__ : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=UpperCamelCase__ , vae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
# create a imagenet -> id dictionary for easier use
UpperCamelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
UpperCamelCase = int(UpperCamelCase__ )
UpperCamelCase = dict(sorted(self.labels.items() ) )
def A ( self : Tuple , UpperCamelCase__ : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = list(UpperCamelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : int = 5_0 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = len(UpperCamelCase__ )
UpperCamelCase = self.transformer.config.sample_size
UpperCamelCase = self.transformer.config.in_channels
UpperCamelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCamelCase__ , device=self.device , dtype=self.transformer.dtype , )
UpperCamelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCamelCase = torch.tensor(UpperCamelCase__ , device=self.device ).reshape(-1 )
UpperCamelCase = torch.tensor([1_0_0_0] * batch_size , device=self.device )
UpperCamelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCamelCase = latent_model_input[: len(UpperCamelCase__ ) // 2]
UpperCamelCase = torch.cat([half, half] , dim=0 )
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = t
if not torch.is_tensor(UpperCamelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCamelCase = latent_model_input.device.type == 'mps'
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = torch.floataa if is_mps else torch.floataa
else:
UpperCamelCase = torch.intaa if is_mps else torch.intaa
UpperCamelCase = torch.tensor([timesteps] , dtype=UpperCamelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCamelCase = self.transformer(
UpperCamelCase__ , timestep=UpperCamelCase__ , class_labels=UpperCamelCase__ ).sample
# perform guidance
if guidance_scale > 1:
UpperCamelCase , UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , len(UpperCamelCase__ ) // 2 , dim=0 )
UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0 )
UpperCamelCase = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , UpperCamelCase__ , dim=1 )
else:
UpperCamelCase = noise_pred
# compute previous image: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
if guidance_scale > 1:
UpperCamelCase , UpperCamelCase = latent_model_input.chunk(2 , dim=0 )
else:
UpperCamelCase = latent_model_input
UpperCamelCase = 1 / self.vae.config.scaling_factor * latents
UpperCamelCase = self.vae.decode(UpperCamelCase__ ).sample
UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 324
| 0
|
import os
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = os.path.dirname(os.path.realpath(a_ ) )
__A = os.path.join(a_ , "triangle.txt" )
with open(a_ ) as f:
__A = f.readlines()
__A = []
for line in triangle:
__A = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(a_ ) )
a.append(a_ )
for i in range(1 , len(a_ ) ):
for j in range(len(a[i] ) ):
__A = a[i - 1][j] if j != len(a[i - 1] ) else 0
__A = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(a_ , a_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 55
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"tf_padding" ) )
self.parent.assertTrue(hasattr(A ,"depth_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,A : int ,A : List[Any]=13 ,A : int=3 ,A : Optional[Any]=32 ,A : Union[str, Any]=0.25 ,A : Tuple=8 ,A : Optional[int]=True ,A : Union[str, Any]=10_24 ,A : Any=32 ,A : Optional[int]="relu6" ,A : int=0.1 ,A : Optional[Any]=0.02 ,A : Optional[Any]=True ,A : List[str]=True ,A : str=10 ,A : str=None ,):
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = min_depth
__A = tf_padding
__A = int(last_hidden_size * depth_multiplier )
__A = output_stride
__A = hidden_act
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : Any ):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Tuple ,A : Optional[int] ,A : List[str] ):
__A = MobileNetVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : List[Any] ,A : int ,A : Union[str, Any] ):
__A = self.num_labels
__A = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
__A = MobileNetVaModelTester(self )
__A = MobileNetVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def UpperCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
def check_hidden_states_output(A : List[Any] ,A : List[Any] ,A : Optional[int] ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 26
self.assertEqual(len(A ) ,A )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
| 55
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
__UpperCAmelCase = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCAmelCase = CLIPTextModel(__a )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Union[str, Any] , __a : Optional[int] , __a : Optional[Any]=0 ) -> int:
__UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__a ) ).to(__a )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase = Image.fromarray(np.uinta(__a ) ).convert('''RGB''' )
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = sd_pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = '''french fries'''
__UpperCAmelCase = sd_pipe(**__a , negative_prompt=__a )
__UpperCAmelCase = output.images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = [inputs['''prompt''']] * 2
__UpperCAmelCase = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
__UpperCAmelCase = torch.from_numpy(__a ).unsqueeze(0 ).to(__a )
__UpperCAmelCase = image / 2 + 0.5
__UpperCAmelCase = image.permute(0 , 3 , 1 , 2 )
__UpperCAmelCase = image.repeat(2 , 1 , 1 , 1 )
__UpperCAmelCase = sd_pipe(**__a ).images
__UpperCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = sd_pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = [round(__a , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(__a ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self : List[Any] ) -> Tuple:
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = VaeImageProcessor(do_resize=__a , do_normalize=__a )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = pipe(**self.get_dummy_inputs_by_type(__a , input_image_type='''pt''' ) )[0]
__UpperCAmelCase = components['''vae''']
__UpperCAmelCase = self.get_dummy_inputs_by_type(__a , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__UpperCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__UpperCAmelCase = pipe(**__a )[0]
__UpperCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(__a , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def snake_case__ ( self : Dict ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[str] , __a : Dict=0 ) -> List[str]:
__UpperCAmelCase = torch.manual_seed(__a )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__UpperCAmelCase = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__ ( self : Any ) -> List[str]:
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a )
__UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__ ( self : Tuple ) -> Tuple:
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a )
__UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = 0
def callback_fn(__a : int , __a : int , __a : torch.FloatTensor ) -> None:
__UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase = latents[0, -3:, -3:, -1]
__UpperCAmelCase = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase = latents[0, -3:, -3:, -1]
__UpperCAmelCase = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCAmelCase = False
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
pipe(**__a , callback=__a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case__ ( self : Any ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase = inputs['''image'''].resize((5_0_4, 5_0_4) )
__UpperCAmelCase = '''timbrooks/instruct-pix2pix'''
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = pipe(**__a )
__UpperCAmelCase = output.images[0]
__UpperCAmelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__UpperCAmelCase = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 706
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654
| 0
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case : Tuple = int(number**0.5 )
return number == sq * sq
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict ):
__snake_case : str = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__snake_case : str = x_den * y_den * z_den
__snake_case : Optional[Any] = gcd(_A , _A )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCAmelCase__( __UpperCAmelCase : List[str] = 35 ):
__snake_case : Dict = set()
__snake_case : int = 42
__snake_case : Optional[Any] = Fraction(0 )
__snake_case : Dict = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__snake_case : Union[str, Any] = x_num * y_den + x_den * y_num
__snake_case : Union[str, Any] = x_den * y_den
__snake_case : str = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__snake_case : Tuple = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=2
__snake_case : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__snake_case : Tuple = x_den * x_den * y_den * y_den
if is_sq(_A ) and is_sq(_A ):
__snake_case : Any = int(sqrt(_A ) )
__snake_case : int = int(sqrt(_A ) )
__snake_case : List[Any] = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__snake_case : Union[str, Any] = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=-1
__snake_case : Tuple = x_num * y_num
__snake_case : Any = x_den * y_num + x_num * y_den
__snake_case : str = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__snake_case : List[str] = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
# n=2
__snake_case : str = x_num * x_num * y_num * y_num
__snake_case : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_A ) and is_sq(_A ):
__snake_case : int = int(sqrt(_A ) )
__snake_case : int = int(sqrt(_A ) )
__snake_case : int = gcd(_A , _A )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__snake_case : Optional[int] = add_three(
_A , _A , _A , _A , _A , _A )
unique_s.add(_A )
for num, den in unique_s:
total += Fraction(_A , _A )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 576
|
# using dfs for finding eulerian path traversal
def __lowerCAmelCase ( _A ,_A ,_A ,_A=None ):
"""simple docstring"""
_lowercase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_lowercase , _lowercase = True, True
_lowercase = dfs(_A ,_A ,_A ,_A )
return path
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = 0
_lowercase = -1
for i in range(_A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_lowercase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
_lowercase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_lowercase , _lowercase = check_circuit_or_path(_A ,_A )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
_lowercase = 1
if check == 2:
_lowercase = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
_lowercase = dfs(_A ,_A ,_A )
print(_A )
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_lowercase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_lowercase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_lowercase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_lowercase = {
1: [],
2: []
# all degree is zero
}
_lowercase = 10
check_euler(_A ,_A )
check_euler(_A ,_A )
check_euler(_A ,_A )
check_euler(_A ,_A )
check_euler(_A ,_A )
if __name__ == "__main__":
main()
| 398
| 0
|
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
def merge(lowerCAmelCase : list , lowerCAmelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCAmelCase ) <= 1:
return collection
SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Any = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 316
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger('''transformers.models.encodec''')
__lowerCamelCase : int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
__lowerCamelCase : Any = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
__lowerCamelCase : Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
__lowerCamelCase : Union[str, Any] = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
__lowerCamelCase : Union[str, Any] = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
__lowerCamelCase : Tuple = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__lowerCamelCase : Optional[int] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__lowerCamelCase : Dict = []
__lowerCamelCase : Any = []
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowerCAmelCase , lowerCAmelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(lowerCAmelCase , lowerCAmelCase ).shape
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : List[Any] = value
elif weight_type == "running_mean":
SCREAMING_SNAKE_CASE_ : Dict = value
elif weight_type == "running_var":
SCREAMING_SNAKE_CASE_ : Dict = value
elif weight_type == "num_batches_tracked":
SCREAMING_SNAKE_CASE_ : List[Any] = value
elif weight_type == "weight_ih_l0":
SCREAMING_SNAKE_CASE_ : Tuple = value
elif weight_type == "weight_hh_l0":
SCREAMING_SNAKE_CASE_ : Optional[int] = value
elif weight_type == "bias_ih_l0":
SCREAMING_SNAKE_CASE_ : Any = value
elif weight_type == "bias_hh_l0":
SCREAMING_SNAKE_CASE_ : Dict = value
elif weight_type == "weight_ih_l1":
SCREAMING_SNAKE_CASE_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
SCREAMING_SNAKE_CASE_ : Tuple = value
elif weight_type == "bias_ih_l1":
SCREAMING_SNAKE_CASE_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
SCREAMING_SNAKE_CASE_ : Dict = MAPPING_24K
elif model_name == "encodec_48khz":
SCREAMING_SNAKE_CASE_ : List[Any] = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCAmelCase , lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = key.split(".*." )
if prefix in name and suffix in name:
SCREAMING_SNAKE_CASE_ : Dict = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.split(lowerCAmelCase )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mapped_key.replace("*" , lowerCAmelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : int = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : int = "weight_v"
elif "weight_ih_l0" in name:
SCREAMING_SNAKE_CASE_ : str = "weight_ih_l0"
elif "weight_hh_l0" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "weight_hh_l0"
elif "bias_ih_l0" in name:
SCREAMING_SNAKE_CASE_ : Any = "bias_ih_l0"
elif "bias_hh_l0" in name:
SCREAMING_SNAKE_CASE_ : List[str] = "bias_hh_l0"
elif "weight_ih_l1" in name:
SCREAMING_SNAKE_CASE_ : List[str] = "weight_ih_l1"
elif "weight_hh_l1" in name:
SCREAMING_SNAKE_CASE_ : List[str] = "weight_hh_l1"
elif "bias_ih_l1" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = "bias_ih_l1"
elif "bias_hh_l1" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "bias_hh_l1"
elif "bias" in name:
SCREAMING_SNAKE_CASE_ : int = "bias"
elif "weight" in name:
SCREAMING_SNAKE_CASE_ : List[str] = "weight"
elif "running_mean" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = "running_mean"
elif "running_var" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = "running_var"
elif "num_batches_tracked" in name:
SCREAMING_SNAKE_CASE_ : Any = "num_batches_tracked"
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
set_recursively(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[Any]=None , ):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE_ : int = EncodecConfig.from_pretrained(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
SCREAMING_SNAKE_CASE_ : Any = [8, 5, 4, 4]
SCREAMING_SNAKE_CASE_ : List[Any] = [2.2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = 6_4
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 3_2_0_0_0
SCREAMING_SNAKE_CASE_ : List[str] = 2_0_4_8
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : int = False
elif model_name == "encodec_48khz":
SCREAMING_SNAKE_CASE_ : Optional[int] = [8, 5, 4, 2]
SCREAMING_SNAKE_CASE_ : int = [3.0, 6.0, 12.0, 24.0]
SCREAMING_SNAKE_CASE_ : Any = 4_8_0_0_0
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = "time_group_norm"
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Optional[int] = 1.0
SCREAMING_SNAKE_CASE_ : List[Any] = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
SCREAMING_SNAKE_CASE_ : Tuple = EncodecModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = torch.load(lowerCAmelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
SCREAMING_SNAKE_CASE_ : Optional[int] = original_checkpoint["best_state"]
recursively_load_weights(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCAmelCase )
model.push_to_hub(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 316
| 1
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _lowercase( __a : Optional[Any] ):
return EnvironmentCommand()
class lowercase_ (lowercase__ ):
@staticmethod
def __UpperCamelCase ( lowercase_) -> Optional[int]:
a__ =parser.add_parser('env')
download_parser.set_defaults(func=lowercase_)
def __UpperCamelCase ( self) -> Dict:
a__ =huggingface_hub.__version__
a__ ='not installed'
a__ ='NA'
if is_torch_available():
import torch
a__ =torch.__version__
a__ =torch.cuda.is_available()
a__ ='not installed'
if is_transformers_available():
import transformers
a__ =transformers.__version__
a__ ='not installed'
if is_accelerate_available():
import accelerate
a__ =accelerate.__version__
a__ ='not installed'
if is_xformers_available():
import xformers
a__ =xformers.__version__
a__ ={
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n')
print(self.format_dict(lowercase_))
return info
@staticmethod
def __UpperCamelCase ( lowercase_) -> List[Any]:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()]) + "\n"
| 20
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'audio-spectrogram-transformer'
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=16 , _lowerCAmelCase=True , _lowerCAmelCase=10 , _lowerCAmelCase=10 , _lowerCAmelCase=1024 , _lowerCAmelCase=128 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = patch_size
UpperCAmelCase__ : Tuple = qkv_bias
UpperCAmelCase__ : Tuple = frequency_stride
UpperCAmelCase__ : Union[str, Any] = time_stride
UpperCAmelCase__ : Optional[Any] = max_length
UpperCAmelCase__ : Optional[int] = num_mel_bins
| 79
| 0
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
snake_case__ = False
class snake_case_( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Any = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : List[Any] = '''A painting of a squirrel eating a burger '''
lowerCAmelCase : List[str] = torch.manual_seed(0 )
lowerCAmelCase : int = pipe(
prompt=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase_ )
lowerCAmelCase : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = generator.manual_seed(0 )
lowerCAmelCase : Optional[int] = pipe(
prompt=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = '''A painting of a squirrel eating a burger '''
lowerCAmelCase : Any = torch.manual_seed(0 )
lowerCAmelCase : int = pipe(
prompt=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
lowerCAmelCase : Any = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase : Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 717
|
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCAmelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE: Dict = random.Random()
def _a ( lowerCAmelCase , lowerCAmelCase=1.0 , lowerCAmelCase=None , lowerCAmelCase=None )-> str:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase_ (unittest.TestCase ):
def __init__( self : str , snake_case__ : Dict , snake_case__ : int=7 , snake_case__ : Any=4_00 , snake_case__ : int=20_00 , snake_case__ : Dict=10 , snake_case__ : Any=1_60 , snake_case__ : str=8 , snake_case__ : Tuple=0.0 , snake_case__ : int=40_00 , snake_case__ : int=False , snake_case__ : Dict=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = return_attention_mask
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = chunk_length
SCREAMING_SNAKE_CASE_ = hop_length
def __a ( self : Optional[Any] ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __a ( self : Optional[int] , snake_case__ : int=False , snake_case__ : str=False ):
"""simple docstring"""
def _flatten(snake_case__ : str ):
return list(itertools.chain(*snake_case__ ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(snake_case__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCAmelCase__ =WhisperFeatureExtractor if is_speech_available() else None
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = WhisperFeatureExtractionTester(self )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = feat_extract_first.save_pretrained(snake_case__ )[0]
check_json_file_has_correct_format(snake_case__ )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_ = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_ = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(snake_case__ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case__ )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_json_file(snake_case__ )
SCREAMING_SNAKE_CASE_ = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_ = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_ = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case__ , snake_case__ ) )
self.assertEqual(snake_case__ , snake_case__ )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='np' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE_ = np.asarray(snake_case__ )
SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='np' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
# Test truncation required
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(snake_case__ ) for speech_input in speech_inputs]
SCREAMING_SNAKE_CASE_ = [x[: feature_extractor.n_samples] for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = [np.asarray(snake_case__ ) for speech_input in speech_inputs_truncated]
SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='np' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case__ , snake_case__ ):
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def __a ( self : List[Any] ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(1_00 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __a ( self : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(snake_case__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = WhisperFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(snake_case__ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case__ , atol=1e-4 ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )[0]
SCREAMING_SNAKE_CASE_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
SCREAMING_SNAKE_CASE_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case__ )[0]
self.assertTrue(np.all(np.mean(snake_case__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case__ ) - 1 ) < 1e-3 ) )
| 360
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase_ :
@staticmethod
def __a ( *snake_case__ : List[Any] , **snake_case__ : List[Any] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class lowercase_ (unittest.TestCase ):
lowerCAmelCase__ =MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE_ = len(snake_case__ )
self.assertGreater(snake_case__ , 0 )
self.assertEqual(
snake_case__ , [
{
'score': ANY(snake_case__ ),
'label': ANY(snake_case__ ),
'box': {'xmin': ANY(snake_case__ ), 'ymin': ANY(snake_case__ ), 'xmax': ANY(snake_case__ ), 'ymax': ANY(snake_case__ )},
}
for i in range(snake_case__ )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
@slow
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0.2
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 360
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 4 , lowerCamelCase__=32 * 6 , lowerCamelCase__=4 , lowerCamelCase__=32 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = is_training
__lowercase = use_auxiliary_loss
__lowercase = num_queries
__lowercase = num_channels
__lowercase = min_size
__lowercase = max_size
__lowercase = num_labels
__lowercase = mask_feature_size
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowercase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowercase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowercase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowercase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = self.prepare_config_and_inputs()
__lowercase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__lowercase = output.encoder_hidden_states
__lowercase = output.pixel_decoder_hidden_states
__lowercase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_config.decoder_layers )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]:
"""simple docstring"""
with torch.no_grad():
__lowercase = MaskFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
__lowercase = MaskFormerForInstanceSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowercase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowercase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class SCREAMING_SNAKE_CASE( __A , __A , unittest.TestCase ):
snake_case_ : Tuple = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case_ : Optional[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case_ : Union[str, Any] = False
snake_case_ : Union[str, Any] = False
snake_case_ : Optional[Any] = False
snake_case_ : Union[str, Any] = False
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = MaskFormerModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def snake_case__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def snake_case__ ( self ) -> str:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowercase = MaskFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
__lowercase = (self.model_tester.min_size,) * 2
__lowercase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"""class_labels""": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowercase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase__ )
__lowercase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowercase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowercase = self.all_model_classes[1]
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowercase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = self.all_model_classes[1]
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
__lowercase = True
__lowercase = True
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowercase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowercase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowercase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowercase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowercase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
A : Union[str, Any] = 1E-4
def snake_case_ ( ):
"""simple docstring"""
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(lowerCamelCase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
__lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
__lowercase = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowercase = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowercase = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase__ )
.eval()
)
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
__lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowercase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowercase = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
__lowercase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowercase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowercase = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(lowerCamelCase__ )
.eval()
)
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
__lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 800, 1088) )
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowercase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__lowercase = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
__lowercase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowercase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__lowercase = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase__ )
.eval()
)
__lowercase = self.default_image_processor
__lowercase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
__lowercase = inputs["""pixel_values"""].to(lowerCamelCase__ )
__lowercase = [el.to(lowerCamelCase__ ) for el in inputs["""mask_labels"""]]
__lowercase = [el.to(lowerCamelCase__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__lowercase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 720
|
'''simple docstring'''
def snake_case_ ( a__ : int ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(a__ )
#
# convert them to integers
for i in range(len(a__ ) ):
__lowercase = int(sequence[i] ,2 )
return sequence
def snake_case_ ( a__ : int ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = """0""" + smaller_sequence[i]
sequence.append(a__ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = """1""" + smaller_sequence[i]
sequence.append(a__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = get_activation("swish" )
self.assertIsInstance(__lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = get_activation("silu" )
self.assertIsInstance(__lowerCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = get_activation("mish" )
self.assertIsInstance(__lowerCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = get_activation("gelu" )
self.assertIsInstance(__lowerCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 16
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __lowerCamelCase ( __a : Optional[Any] , __a : int ) -> str:
inspect_dataset(__a , __a )
_lowercase =path + ".py"
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __lowerCamelCase ( __a : str , __a : List[Any] ) -> Dict:
inspect_metric(__a , __a )
_lowercase =path + ".py"
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __lowerCamelCase ( __a : Union[str, Any] , __a : Tuple , __a : Optional[int] ) -> List[Any]:
_lowercase =get_dataset_config_info(__a , config_name=__a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __lowerCamelCase ( __a : Optional[Any] , __a : Dict , __a : Optional[Any] ) -> int:
with pytest.raises(__a ):
get_dataset_config_info(__a , config_name=__a )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __lowerCamelCase ( __a : Any , __a : List[Any] ) -> Tuple:
_lowercase =get_dataset_config_names(__a )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __lowerCamelCase ( __a : str , __a : Any , __a : List[str] ) -> Optional[int]:
_lowercase =get_dataset_infos(__a )
assert list(infos.keys() ) == expected_configs
_lowercase =expected_configs[0]
assert expected_config in infos
_lowercase =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __lowerCamelCase ( __a : Tuple , __a : str , __a : List[Any] ) -> Tuple:
_lowercase =get_dataset_infos(__a )
assert expected_config in infos
_lowercase =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __lowerCamelCase ( __a : Dict , __a : List[Any] , __a : List[Any] ) -> Tuple:
with pytest.raises(__a ):
get_dataset_split_names(__a , config_name=__a )
| 717
|
from math import factorial
def __lowerCamelCase ( __a : int , __a : int , __a : float ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
_lowercase =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_lowercase =float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 594
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.