code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase : Optional[Any] = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowerCAmelCase : Optional[Any] = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = SavedModel()
lowerCAmelCase__ = []
with open(os.path.join(lowerCamelCase__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
lowerCAmelCase__ = json.load(lowerCamelCase__ )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowerCamelCase__ )] )
with open(lowerCamelCase__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
lowerCAmelCase__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowerCAmelCase__ = sorted(lowerCamelCase__ )
lowerCAmelCase__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCamelCase__ )
if strict and len(lowerCamelCase__ ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowerCamelCase__ ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowerCamelCase__ , sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__lowerCAmelCase : Optional[Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 644 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] =logging.get_logger(__name__)
A_ : Tuple ={
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __UpperCAmelCase ( a_ ):
__A : int = 'gpt_neox'
def __init__( self , _lowerCamelCase=5_0432 , _lowerCamelCase=6144 , _lowerCamelCase=44 , _lowerCamelCase=64 , _lowerCamelCase=2_4576 , _lowerCamelCase="gelu" , _lowerCamelCase=0.25 , _lowerCamelCase=1_0000 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=2048 , _lowerCamelCase=0.02 , _lowerCamelCase=1E-5 , _lowerCamelCase=True , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = rotary_pct
lowerCAmelCase_ = rotary_emb_base
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = hidden_dropout
lowerCAmelCase_ = classifier_dropout
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = use_parallel_residual
lowerCAmelCase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def UpperCAmelCase_ ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
lowerCAmelCase_ = self.rope_scaling.get('''type''' , _snake_case )
lowerCAmelCase_ = self.rope_scaling.get('''factor''' , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 274 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case_ : Dict = logging.get_logger(__name__)
# General docstring
snake_case_ : Tuple = """RegNetConfig"""
# Base docstring
snake_case_ : Union[str, Any] = """facebook/regnet-y-040"""
snake_case_ : List[str] = [1, 1_088, 7, 7]
# Image classification docstring
snake_case_ : List[str] = """facebook/regnet-y-040"""
snake_case_ : Union[str, Any] = """tabby, tabby cat"""
snake_case_ : Optional[int] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 3 , lowerCamelCase__ = 1 , lowerCamelCase__ = 1 , lowerCamelCase__ = "relu" , ):
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Convad(
_snake_case , _snake_case , kernel_size=_snake_case , stride=_snake_case , padding=kernel_size // 2 , groups=_snake_case , bias=_snake_case , )
UpperCamelCase = nn.BatchNormad(_snake_case )
UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.convolution(_snake_case )
UpperCamelCase = self.normalization(_snake_case )
UpperCamelCase = self.activation(_snake_case )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
'''simple docstring'''
super().__init__()
UpperCamelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCamelCase = config.num_channels
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCamelCase = self.embedder(_snake_case )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 2 ):
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Convad(_snake_case , _snake_case , kernel_size=1 , stride=_snake_case , bias=_snake_case )
UpperCamelCase = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.convolution(_snake_case )
UpperCamelCase = self.normalization(_snake_case )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
UpperCamelCase = nn.Sequential(
nn.Convad(_snake_case , _snake_case , kernel_size=1 ) , nn.ReLU() , nn.Convad(_snake_case , _snake_case , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.pooler(_snake_case )
UpperCamelCase = self.attention(_snake_case )
UpperCamelCase = hidden_state * attention
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 ):
'''simple docstring'''
super().__init__()
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
RegNetShortCut(_snake_case , _snake_case , stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase = nn.Sequential(
RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_snake_case , _snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act ) , RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=_snake_case ) , )
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = hidden_state
UpperCamelCase = self.layer(_snake_case )
UpperCamelCase = self.shortcut(_snake_case )
hidden_state += residual
UpperCamelCase = self.activation(_snake_case )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 ):
'''simple docstring'''
super().__init__()
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
RegNetShortCut(_snake_case , _snake_case , stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase = nn.Sequential(
RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_snake_case , _snake_case , stride=_snake_case , groups=_snake_case , activation=config.hidden_act ) , RegNetSELayer(_snake_case , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_snake_case , _snake_case , kernel_size=1 , activation=_snake_case ) , )
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = hidden_state
UpperCamelCase = self.layer(_snake_case )
UpperCamelCase = self.shortcut(_snake_case )
hidden_state += residual
UpperCamelCase = self.activation(_snake_case )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , ):
'''simple docstring'''
super().__init__()
UpperCamelCase = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case , _snake_case , _snake_case , stride=_snake_case , ) , *[layer(_snake_case , _snake_case , _snake_case ) for _ in range(depth - 1 )] , )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.layers(_snake_case )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case , config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case , _snake_case , _snake_case , depth=_snake_case ) )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = True ):
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(_snake_case )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case , hidden_states=_snake_case )
class lowercase__ ( a_ ):
'''simple docstring'''
_snake_case = RegNetConfig
_snake_case = 'regnet'
_snake_case = 'pixel_values'
_snake_case = True
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if isinstance(_snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
UpperCamelCase = value
snake_case_ : List[Any] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
snake_case_ : Any = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', a_, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowercase__ ( a_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
'''simple docstring'''
super().__init__(_snake_case )
UpperCamelCase = config
UpperCamelCase = RegNetEmbeddings(_snake_case )
UpperCamelCase = RegNetEncoder(_snake_case )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(_snake_case )
UpperCamelCase = self.encoder(
_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case , pooler_output=_snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''', a_, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowercase__ ( a_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
'''simple docstring'''
super().__init__(_snake_case )
UpperCamelCase = config.num_labels
UpperCamelCase = RegNetModel(_snake_case )
# classification head
UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase ( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
'''simple docstring'''
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(_snake_case , output_hidden_states=_snake_case , return_dict=_snake_case )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(_snake_case )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = '''single_label_classification'''
else:
UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(_snake_case , _snake_case )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(_snake_case , _snake_case )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states )
| 212 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
'''simple docstring'''
from collections import defaultdict
def snake_case__ ( _A: str , _A: Dict ) -> bool:
'''simple docstring'''
lowerCAmelCase = first_str.lower().strip()
lowerCAmelCase = second_str.lower().strip()
# Remove whitespace
lowerCAmelCase = first_str.replace(""" """ , """""" )
lowerCAmelCase = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(_A ) != len(_A ):
return False
# Default values for count should be 0
lowerCAmelCase = defaultdict(_A )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_A ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowercase = input('''Enter the first string ''').strip()
__lowercase = input('''Enter the second string ''').strip()
__lowercase = check_anagrams(input_a, input_b)
print(f'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 370 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ : Optional[Any] = """docs/source/en/_toctree.yml"""
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> List[str]:
"""simple docstring"""
a_ : int = defaultdict(__A )
for doc in model_doc:
counts[doc["local"]] += 1
a_ : str = [key for key, value in counts.items() if value > 1]
a_ : Union[str, Any] = []
for duplicate_key in duplicates:
a_ : str = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__A ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__A , key=lambda __A : s["title"].lower() )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
with open(__A , encoding='utf-8' ) as f:
a_ : int = yaml.safe_load(f.read() )
# Get to the API doc
a_ : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a_ : Optional[Any] = content[api_idx]['''sections''']
# Then to the model doc
a_ : Tuple = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
a_ : Dict = api_doc[model_idx]['''sections''']
a_ : Any = [(idx, section) for idx, section in enumerate(__A ) if '''sections''' in section]
a_ : int = False
for idx, modality_doc in modalities_docs:
a_ : Optional[int] = modality_doc['''sections''']
a_ : Optional[int] = clean_model_doc_toc(__A )
if old_modality_doc != new_modality_doc:
a_ : str = True
if overwrite:
a_ : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
a_ : Union[str, Any] = model_doc
a_ : List[str] = api_doc
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__A , allow_unicode=__A ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCAmelCase_ : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 570 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( a_ , unittest.TestCase ):
A = CodeGenTokenizer
A = CodeGenTokenizerFast
A = True
A = {'add_prefix_space': True}
A = False
def A_( self ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def A_( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def A_( self , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def A_( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = '''lower newer'''
return input_text, output_text
def A_( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_snake_case , add_prefix_space=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def A_( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer(add_prefix_space=_snake_case )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
# Testing tokenization
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_snake_case , add_prefix_space=_snake_case )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer(add_prefix_space=_snake_case )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_snake_case , add_prefix_space=_snake_case )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# Testing the unknown token
SCREAMING_SNAKE_CASE_ = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def A_( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
pass
def A_( self , SCREAMING_SNAKE_CASE=15 ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
# Simple input
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE_ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='max_length' )
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='max_length' , )
def A_( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input looooooooong''', '''This is a simple input''']
SCREAMING_SNAKE_CASE_ = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE_ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ = tokenizer(_snake_case , padding='max_length' , max_length=30 , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = tokenizer(_snake_case , padding=_snake_case , truncate=_snake_case , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = tokenizer(*_snake_case , padding='max_length' , max_length=60 , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = tokenizer(_snake_case , padding=_snake_case , truncate=_snake_case , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''$$$'''
SCREAMING_SNAKE_CASE_ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_snake_case , add_bos_token=_snake_case )
SCREAMING_SNAKE_CASE_ = '''This is a simple input'''
SCREAMING_SNAKE_CASE_ = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE_ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ = tokenizer(_snake_case )
SCREAMING_SNAKE_CASE_ = tokenizer(_snake_case )
self.assertEqual(out_s.input_ids[0] , _snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A_( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
SCREAMING_SNAKE_CASE_ = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
SCREAMING_SNAKE_CASE_ = '''\nif len_a > len_b: result = a\nelse: result = b'''
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_snake_case )
SCREAMING_SNAKE_CASE_ = ['''^#''', re.escape('<|endoftext|>' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
SCREAMING_SNAKE_CASE_ = tokenizer.decode(_snake_case , truncate_before_pattern=_snake_case )
self.assertEqual(_snake_case , _snake_case )
def A_( self ) -> Any:
"""simple docstring"""
pass
| 205 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase__: List[Any] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase__: List[Any] = DisjunctiveConstraint(_snake_case )
self.assertTrue(isinstance(dc.token_ids , _snake_case ) )
with self.assertRaises(_snake_case ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_snake_case ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase__: Optional[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_snake_case ):
DisjunctiveConstraint(_snake_case ) # fails here
def _UpperCAmelCase ( self ):
UpperCAmelCase__: int = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase__: Optional[Any] = DisjunctiveConstraint(_snake_case )
UpperCAmelCase__: Optional[int] = dc.update(1 )
UpperCAmelCase__: Any = stepped is True and completed is False and reset is False
self.assertTrue(_snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase__: Dict = dc.update(2 )
UpperCAmelCase__: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase__: List[str] = dc.update(3 )
UpperCAmelCase__: Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_snake_case )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: int = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase__: str = DisjunctiveConstraint(_snake_case )
UpperCAmelCase__: Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase__: List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase__: Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase__: List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase__: Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase__: int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase__: Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 113 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_A : Optional[Any] = logging.get_logger(__name__)
def _a ( UpperCAmelCase ) -> List[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
lowerCamelCase__ : List[str] = tf.shape(UpperCAmelCase )
if tensor.shape == tf.TensorShape(UpperCAmelCase ):
return dynamic
lowerCamelCase__ : Tuple = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )]
def _a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) -> Any:
"""simple docstring"""
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowerCamelCase__ : Any = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCamelCase__ : List[str] = [1] * inputs.shape.rank
lowerCamelCase__ : Tuple = shape_list(UpperCAmelCase )[axis]
lowerCamelCase__ : int = tf.reshape(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : int = tf.reshape(UpperCAmelCase , UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
lowerCamelCase__ : Any = tf.nn.batch_normalization(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , )
return outputs
def _a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) -> Optional[int]:
"""simple docstring"""
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCamelCase__ : Tuple = tf.shape(UpperCAmelCase )
lowerCamelCase__ : Any = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCamelCase__ : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
def _a ( UpperCAmelCase ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(UpperCAmelCase , tf.Tensor ):
lowerCamelCase__ : List[str] = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCamelCase__ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCamelCase__ : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCamelCase__ : Optional[int] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding "
f"layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Tuple = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCamelCase__ : Dict = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
lowerCamelCase__ : Any = np.asarray(UpperCAmelCase )
lowerCamelCase__ : List[Any] = 1
lowerCamelCase__ : Optional[int] = np.array_split(UpperCAmelCase , UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCamelCase__ : Tuple = np.array_split(UpperCAmelCase , UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = chunk_data
else:
lowerCamelCase__ : Dict = data
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
if name in group.attrs:
lowerCamelCase__ : List[Any] = [n.decode('''utf8''' ) if hasattr(UpperCAmelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : str = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(UpperCAmelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
| 315 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCAmelCase: Dict = trt.Logger(trt.Logger.WARNING)
_lowerCAmelCase: Union[str, Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCAmelCase: Optional[Any] = logging.getLogger(__name__)
_lowerCAmelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
_lowerCAmelCase: Dict = parser.parse_args()
if args.tokenizer_name:
_lowerCAmelCase: Any = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
_lowerCAmelCase: str = args.per_device_eval_batch_size
_lowerCAmelCase: Optional[Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCAmelCase: List[str] = True
_lowerCAmelCase: Tuple = """temp_engine/bert-fp32.engine"""
if args.fpaa:
_lowerCAmelCase: Optional[Any] = """temp_engine/bert-fp16.engine"""
if args.inta:
_lowerCAmelCase: str = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
_lowerCAmelCase: Dict = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCAmelCase: str = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCAmelCase: Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCAmelCase: int = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCAmelCase: int = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCAmelCase: Optional[int] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _lowercase( __a : Any , __a : str , __a : Any , __a : Tuple , __a : int , __a : Union[str, Any] , __a : Optional[int] , __a : str ):
a__ =np.asarray(inputs['input_ids'] , dtype=np.intaa )
a__ =np.asarray(inputs['attention_mask'] , dtype=np.intaa )
a__ =np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __a )
# start time
a__ =time.time()
# Run inference
context.execute_async(
bindings=[int(__a ) for d_inp in d_inputs] + [int(__a ), int(__a )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__a , __a , __a )
cuda.memcpy_dtoh_async(__a , __a , __a )
# Synchronize the stream and take time
stream.synchronize()
# end time
a__ =time.time()
a__ =end_time - start_time
a__ =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCAmelCase: Any = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCAmelCase: Optional[int] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCAmelCase: Dict = raw_datasets["""validation"""].column_names
_lowerCAmelCase: List[Any] = """question""" if """question""" in column_names else column_names[0]
_lowerCAmelCase: int = """context""" if """context""" in column_names else column_names[1]
_lowerCAmelCase: Any = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCAmelCase: Any = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
_lowerCAmelCase: Optional[Any] = min(args.max_seq_length, tokenizer.model_max_length)
def _lowercase( __a : List[Any] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
a__ =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
a__ =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=__a , stride=args.doc_stride , return_overflowing_tokens=__a , return_offsets_mapping=__a , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
a__ =tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
a__ =[]
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
a__ =tokenized_examples.sequence_ids(__a )
a__ =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
a__ =sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
a__ =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
_lowerCAmelCase: int = raw_datasets["""validation"""]
# Validation Feature Creation
_lowerCAmelCase: List[str] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
_lowerCAmelCase: Any = default_data_collator
_lowerCAmelCase: Any = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
_lowerCAmelCase: Dict = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
a__ =postprocess_qa_predictions(
examples=__a , features=__a , predictions=__a , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__a , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
a__ =[
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
a__ =[{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
a__ =[{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__a , label_ids=__a )
_lowerCAmelCase: List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowercase( __a : Optional[Any] ):
return trt.volume(engine.get_binding_shape(__a ) ) * engine.get_binding_dtype(__a ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCAmelCase: Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCAmelCase: Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCAmelCase: List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCAmelCase: str = cuda.mem_alloc(h_outputa.nbytes)
_lowerCAmelCase: Dict = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCAmelCase: str = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
_lowerCAmelCase: Optional[int] = 0.0
_lowerCAmelCase: Dict = 0
_lowerCAmelCase: Union[str, Any] = timeit.default_timer()
_lowerCAmelCase: Optional[int] = None
for step, batch in enumerate(eval_dataloader):
_lowerCAmelCase: Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCAmelCase: Any = outputs
_lowerCAmelCase: Optional[int] = torch.tensor(start_logits)
_lowerCAmelCase: Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCAmelCase: Dict = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_lowerCAmelCase: Tuple = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_lowerCAmelCase: int = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCAmelCase: str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_lowerCAmelCase: Tuple = nested_truncate(all_preds, len(eval_dataset))
_lowerCAmelCase: int = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
_lowerCAmelCase: Tuple = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCAmelCase: str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 20 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a__ : str =logging.get_logger(__name__)
a__ : Any ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ : Dict ={
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
a__ : int ={
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
a__ : List[str] ={
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class snake_case ( a_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Tuple =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] =PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] =SqueezeBertTokenizer
def __init__( self : List[str] , __A : Optional[int]=None , __A : Optional[int]=None , __A : Optional[Any]=True , __A : List[str]="[UNK]" , __A : Optional[Any]="[SEP]" , __A : Any="[PAD]" , __A : Tuple="[CLS]" , __A : Dict="[MASK]" , __A : Any=True , __A : List[Any]=None , **__A : str , ):
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' , _snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _snake_case ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(_snake_case , normalizer_state.pop('type' ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**_snake_case )
__UpperCamelCase = do_lower_case
def _lowerCamelCase ( self : str , __A : List[str] , __A : Optional[Any]=None ):
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : Any , __A : List[str] , __A : str = None ):
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Tuple , __A : List[Any] , __A : Optional[Any] = None ):
__UpperCamelCase = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 399 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
'''simple docstring'''
from copy import deepcopy
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> None:
if arr is None and size is not None:
lowerCAmelCase__ : Optional[int] = size
lowerCAmelCase__ : Dict = [0] * size
elif arr is not None:
self.init(_snake_case )
else:
raise ValueError("""Either arr or size must be specified""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : int = len(_snake_case )
lowerCAmelCase__ : List[str] = deepcopy(_snake_case )
for i in range(1 ,self.size ):
lowerCAmelCase__ : str = self.next_(_snake_case )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase_ ( self ) -> list[int]:
lowerCAmelCase__ : Tuple = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
lowerCAmelCase__ : Optional[Any] = self.next_(_snake_case )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ) -> int:
return index - (index & (-index))
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase__ : Union[str, Any] = self.next_(_snake_case )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
self.add(_snake_case ,value - self.get(_snake_case ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
if right == 0:
return 0
lowerCAmelCase__ : Dict = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase__ : str = self.prev(_snake_case )
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
return self.prefix(_snake_case ) - self.prefix(_snake_case )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return self.query(_snake_case ,index + 1 )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase__ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase__ : Optional[int] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = len(lowerCamelCase__ )
lowerCAmelCase__ = len(matrix[0] )
lowerCAmelCase__ = min(lowerCamelCase__ , lowerCamelCase__ )
for row in range(lowerCamelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCamelCase__ ):
lowerCAmelCase__ = matrix[col][row] / matrix[row][row]
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ = True
for i in range(row + 1 , lowerCamelCase__ ):
if matrix[i][row] != 0:
lowerCAmelCase__ = matrix[i], matrix[row]
lowerCAmelCase__ = False
break
if reduce:
rank -= 1
for i in range(lowerCamelCase__ ):
lowerCAmelCase__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __UpperCAmelCase ( a_ ):
__A : Tuple = 'microsoft/speecht5_tts'
__A : Dict = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
__A : str = 'text_reader'
__A : List[Any] = SpeechTaProcessor
__A : Any = SpeechTaForTextToSpeech
__A : List[Any] = SpeechTaHifiGan
__A : str = ['text']
__A : str = ['audio']
def UpperCAmelCase_ ( self ):
if self.post_processor is None:
lowerCAmelCase_ = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ):
lowerCAmelCase_ = self.pre_processor(text=_snake_case , return_tensors='''pt''' , truncation=_snake_case )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
lowerCAmelCase_ = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
lowerCAmelCase_ = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self , _lowerCamelCase ):
with torch.no_grad():
return self.model.generate_speech(**_snake_case )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
with torch.no_grad():
return self.post_processor(_snake_case ).cpu().detach()
| 274 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
'''simple docstring'''
import numpy as np
from PIL import Image
def __snake_case ( _UpperCAmelCase : Optional[int], _UpperCAmelCase : Any, _UpperCAmelCase : int):
UpperCamelCase = np.array(_UpperCAmelCase)
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''')
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
# compute the shape of the output matrix
UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase = 0
UpperCamelCase = 0
return updated_arr
def __snake_case ( _UpperCAmelCase : Tuple, _UpperCAmelCase : List[str], _UpperCAmelCase : List[str]):
UpperCamelCase = np.array(_UpperCAmelCase)
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''')
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
# compute the shape of the output matrix
UpperCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase = 0
UpperCamelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
snake_case_ : Optional[Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 212 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
'''simple docstring'''
from __future__ import annotations
__lowercase = list[tuple[int, int]]
__lowercase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = pos_x
lowerCAmelCase = pos_y
lowerCAmelCase = (pos_y, pos_x)
lowerCAmelCase = goal_x
lowerCAmelCase = goal_y
lowerCAmelCase = g_cost
lowerCAmelCase = parent
lowerCAmelCase = self.calculate_heuristic()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = abs(self.pos_x - self.goal_x)
lowerCAmelCase = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__( self , __lowerCAmelCase):
"""simple docstring"""
return self.f_cost < other.f_cost
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _snake_case)
lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _snake_case)
lowerCAmelCase = [self.start]
lowerCAmelCase = []
lowerCAmelCase = False
def a_ ( self):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
lowerCAmelCase = True
return self.retrace_path(_snake_case)
self.closed_nodes.append(_snake_case)
lowerCAmelCase = self.get_successors(_snake_case)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_snake_case)
else:
# retrieve the best current path
lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(_snake_case))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_snake_case)
else:
self.open_nodes.append(_snake_case)
if not self.reached:
return [self.start.pos]
return None
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
for action in delta:
lowerCAmelCase = parent.pos_x + action[1]
lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _snake_case , ))
return successors
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = node
lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
lowerCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__lowercase = (0, 0)
__lowercase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__lowercase = GreedyBestFirst(init, goal)
__lowercase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__lowercase = 2
for elem in grid:
print(elem)
| 370 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
a_ : int = text, pattern
a_ : List[Any] = len(_snake_case ), len(_snake_case )
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self : int ) -> list[int]:
# searches pattern in text and returns index positions
a_ : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
a_ : Optional[Any] = self.mismatch_in_text(_snake_case )
if mismatch_index == -1:
positions.append(_snake_case )
else:
a_ : Tuple = self.match_in_pattern(self.text[mismatch_index] )
a_ : Union[str, Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCAmelCase_ : Optional[Any] = """ABAABA"""
UpperCAmelCase_ : int = """AB"""
UpperCAmelCase_ : Dict = BoyerMooreSearch(text, pattern)
UpperCAmelCase_ : str = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 570 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : int = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ : Dict = {
"""albert-base-v1""": 5_12,
"""albert-large-v1""": 5_12,
"""albert-xlarge-v1""": 5_12,
"""albert-xxlarge-v1""": 5_12,
"""albert-base-v2""": 5_12,
"""albert-large-v2""": 5_12,
"""albert-xlarge-v2""": 5_12,
"""albert-xxlarge-v2""": 5_12,
}
SCREAMING_SNAKE_CASE__ : Tuple = """▁"""
class a_ ( a_ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = AlbertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case , normalized=_snake_case )
if isinstance(_snake_case , _snake_case )
else mask_token
)
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 205 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
from __future__ import annotations
_lowerCAmelCase : Union[str, Any] =[True] * 1_00_00_01
_lowerCAmelCase : Optional[Any] =2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
_lowerCAmelCase : List[Any] =False
i += 1
def _A ( SCREAMING_SNAKE_CASE ):
return seive[n]
def _A ( SCREAMING_SNAKE_CASE ):
return any(digit in "02468" for digit in str(SCREAMING_SNAKE_CASE ) )
def _A ( SCREAMING_SNAKE_CASE = 1_0_0_0_0_0_0 ):
UpperCAmelCase__: List[Any] = [2] # result already includes the number 2.
for num in range(3 ,limit + 1 ,2 ):
if is_prime(SCREAMING_SNAKE_CASE ) and not contains_an_even_digit(SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Union[str, Any] = str(SCREAMING_SNAKE_CASE )
UpperCAmelCase__: str = [int(str_num[j:] + str_num[:j] ) for j in range(len(SCREAMING_SNAKE_CASE ) )]
if all(is_prime(SCREAMING_SNAKE_CASE ) for i in list_nums ):
result.append(SCREAMING_SNAKE_CASE )
return result
def _A ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }") | 113 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class __SCREAMING_SNAKE_CASE ( a_ ):
_UpperCAmelCase : str = field(default="summarization" ,metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
_UpperCAmelCase : ClassVar[Features] = Features({"summary": Value("string" )} )
_UpperCAmelCase : str = "text"
_UpperCAmelCase : str = "summary"
@property
def __lowerCamelCase ( self : Dict ) ->Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 315 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
def _lowercase( ):
a__ =[]
a__ =1
while len(__a ) < 1e6:
constant.append(str(__a ) )
i += 1
a__ =''''''.join(__a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 20 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
'''simple docstring'''
from manim import *
class snake_case ( a_ ):
"""simple docstring"""
def _lowerCamelCase ( self : int ):
__UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
__UpperCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCamelCase = [mem.copy() for i in range(6 )]
__UpperCamelCase = [mem.copy() for i in range(6 )]
__UpperCamelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__UpperCamelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__UpperCamelCase = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
__UpperCamelCase = Text('CPU' , font_size=2_4 )
__UpperCamelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
__UpperCamelCase = [mem.copy() for i in range(4 )]
__UpperCamelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__UpperCamelCase = Text('GPU' , font_size=2_4 )
__UpperCamelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.move_to([-1, -1, 0] )
self.add(_snake_case )
__UpperCamelCase = [mem.copy() for i in range(6 )]
__UpperCamelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__UpperCamelCase = Text('Model' , font_size=2_4 )
__UpperCamelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.add(_snake_case )
__UpperCamelCase = []
for i, rect in enumerate(_snake_case ):
rect.set_stroke(_snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__UpperCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_snake_case , buff=0.0 )
self.add(_snake_case )
cpu_targs.append(_snake_case )
__UpperCamelCase = [mem.copy() for i in range(6 )]
__UpperCamelCase = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__UpperCamelCase = Text('Loaded Checkpoint' , font_size=2_4 )
__UpperCamelCase = Group(_snake_case , _snake_case ).arrange(_snake_case , aligned_edge=_snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_snake_case , _snake_case )
__UpperCamelCase = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(_snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__UpperCamelCase = MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case ) , Write(_snake_case ) )
self.play(Write(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) )
__UpperCamelCase = []
__UpperCamelCase = []
for i, rect in enumerate(_snake_case ):
__UpperCamelCase = fill.copy().set_fill(_snake_case , opacity=0.7 )
target.move_to(_snake_case )
first_animations.append(GrowFromCenter(_snake_case , run_time=1 ) )
__UpperCamelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(*_snake_case )
self.wait()
| 399 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : Optional[int] = AutoModelForSeqaSeqLM.from_config(UpperCamelCase )
model.save_pretrained(UpperCamelCase )
AutoTokenizer.from_pretrained(UpperCamelCase ).save_pretrained(UpperCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 565 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase : List[str] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
__lowerCAmelCase : List[Any] = """▁"""
# Segments (not really needed)
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[Any] = 2
__lowerCAmelCase : str = 3
__lowerCAmelCase : str = 4
class a_ ( a_ ):
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[str] = 'left'
UpperCamelCase_ : List[Any] = XLNetTokenizer
def __init__( self : Dict , snake_case__ : int=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[Any]=False , snake_case__ : Dict=True , snake_case__ : Optional[int]=False , snake_case__ : List[Any]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : int="<unk>" , snake_case__ : Any="<sep>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : Union[str, Any]="<cls>" , snake_case__ : int="<mask>" , snake_case__ : Any=["<eop>", "<eod>"] , **snake_case__ : Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
vocab_file=_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , **_snake_case , )
lowerCAmelCase__ = 3
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = remove_space
lowerCAmelCase__ = keep_accents
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple , snake_case__ : Optional[Any] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : List[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 644 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
from manim import *
class __UpperCAmelCase ( a_ ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ = [mem.copy() for i in range(6 )]
lowerCAmelCase_ = [mem.copy() for i in range(6 )]
lowerCAmelCase_ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
lowerCAmelCase_ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
lowerCAmelCase_ = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
lowerCAmelCase_ = Text('''CPU''' , font_size=24 )
lowerCAmelCase_ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
lowerCAmelCase_ = [mem.copy() for i in range(1 )]
lowerCAmelCase_ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
lowerCAmelCase_ = Text('''GPU''' , font_size=24 )
lowerCAmelCase_ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.align_to(_snake_case , _snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(_snake_case )
lowerCAmelCase_ = [mem.copy() for i in range(6 )]
lowerCAmelCase_ = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
lowerCAmelCase_ = Text('''Model''' , font_size=24 )
lowerCAmelCase_ = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) , )
lowerCAmelCase_ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
lowerCAmelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case , run_time=2.5 ) , Write(_snake_case ) , Write(_snake_case ) )
self.add(_snake_case )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for i, rect in enumerate(_snake_case ):
lowerCAmelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
cpu_target.move_to(_snake_case )
cpu_target.generate_target()
lowerCAmelCase_ = 0.46 / 4
lowerCAmelCase_ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_snake_case , buff=0.0 )
cpu_targs.append(_snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_snake_case ) )
second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(*_snake_case )
self.wait()
| 274 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : str = ["""model.decoder.embed_positions.weights"""]
def __snake_case ( _UpperCAmelCase : Optional[int]):
if "emb" in name:
UpperCamelCase = name.replace('''emb''', '''model.decoder.embed_tokens''')
if "transformer" in name:
UpperCamelCase = name.replace('''transformer''', '''model.decoder''')
if "cross_attention" in name:
UpperCamelCase = name.replace('''cross_attention''', '''encoder_attn''')
if "linear1" in name:
UpperCamelCase = name.replace('''linear1''', '''fc1''')
if "linear2" in name:
UpperCamelCase = name.replace('''linear2''', '''fc2''')
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''', '''self_attn_layer_norm''')
if "norm_cross" in name:
UpperCamelCase = name.replace('''norm_cross''', '''encoder_attn_layer_norm''')
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''', '''final_layer_norm''')
if "out_norm" in name:
UpperCamelCase = name.replace('''out_norm''', '''model.decoder.layer_norm''')
if "linears" in name:
UpperCamelCase = name.replace('''linears''', '''lm_heads''')
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''', '''enc_to_dec_proj''')
return name
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[int]):
UpperCamelCase = list(state_dict.keys())
UpperCamelCase = {}
for key in keys:
UpperCamelCase = state_dict.pop(_UpperCAmelCase)
UpperCamelCase = rename_keys(_UpperCAmelCase)
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase = val[:hidden_size, :]
UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase = val
else:
UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def __snake_case ( _UpperCAmelCase : List[Any]):
if checkpoint == "small":
# default config values
UpperCamelCase = 1024
UpperCamelCase = 24
UpperCamelCase = 16
elif checkpoint == "medium":
UpperCamelCase = 1536
UpperCamelCase = 48
UpperCamelCase = 24
elif checkpoint == "large":
UpperCamelCase = 2048
UpperCamelCase = 48
UpperCamelCase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.')
UpperCamelCase = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase, ffn_dim=hidden_size * 4, num_hidden_layers=_UpperCAmelCase, num_attention_heads=_UpperCAmelCase, )
return config
@torch.no_grad()
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Tuple=None, _UpperCAmelCase : List[Any]="cpu"):
UpperCamelCase = MusicGen.get_pretrained(_UpperCAmelCase, device=_UpperCAmelCase)
UpperCamelCase = decoder_config_from_checkpoint(_UpperCAmelCase)
UpperCamelCase = fairseq_model.lm.state_dict()
UpperCamelCase = rename_state_dict(
_UpperCAmelCase, hidden_size=decoder_config.hidden_size)
UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''')
UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''')
UpperCamelCase = MusicgenForCausalLM(_UpperCAmelCase).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase = decoder.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase)
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''')) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}')
if len(_UpperCAmelCase) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}')
# init the composite model
UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase, audio_encoder=_UpperCAmelCase, decoder=_UpperCAmelCase)
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase)
# check we can do a forward pass
UpperCamelCase = torch.arange(0, 8, dtype=torch.long).reshape(2, -1)
UpperCamelCase = input_ids.reshape(2 * 4, -1)
with torch.no_grad():
UpperCamelCase = model(input_ids=_UpperCAmelCase, decoder_input_ids=_UpperCAmelCase).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''')
# now construct the processor
UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''')
UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''', padding_side='''left''')
UpperCamelCase = MusicgenProcessor(feature_extractor=_UpperCAmelCase, tokenizer=_UpperCAmelCase)
# set the appropriate bos/pad token ids
UpperCamelCase = 2048
UpperCamelCase = 2048
# set other default generation config params
UpperCamelCase = int(30 * audio_encoder.config.frame_rate)
UpperCamelCase = True
UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase).mkdir(exist_ok=_UpperCAmelCase)
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}')
model.save_pretrained(_UpperCAmelCase)
processor.save_pretrained(_UpperCAmelCase)
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}')
model.push_to_hub(_UpperCAmelCase)
processor.push_to_hub(_UpperCAmelCase)
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
snake_case_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 212 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCAmelCase_ : int = ["""text""", """image""", """audio"""]
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> List[Any]:
"""simple docstring"""
a_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(__A , __A ):
inputs.append(create_inputs(__A ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Any:
"""simple docstring"""
a_ : int = []
for output in outputs:
if isinstance(__A , (str, AgentText) ):
output_types.append('text' )
elif isinstance(__A , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(__A , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class SCREAMING_SNAKE_CASE__ :
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
a_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _snake_case ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
a_ : Optional[Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
a_ : str = create_inputs(self.tool.inputs )
a_ : Optional[int] = self.tool(*_snake_case )
# There is a single output
if len(self.tool.outputs ) == 1:
a_ : Optional[Any] = [outputs]
self.assertListEqual(output_types(_snake_case ) , self.tool.outputs )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Union[str, Any] = create_inputs(self.tool.inputs )
a_ : Any = self.tool(*_snake_case )
if not isinstance(_snake_case , _snake_case ):
a_ : Any = [outputs]
self.assertEqual(len(_snake_case ) , len(self.tool.outputs ) )
for output, output_type in zip(_snake_case , self.tool.outputs ):
a_ : Union[str, Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
a_ : Optional[int] = create_inputs(self.tool.inputs )
a_ : List[str] = []
for _input, input_type in zip(_snake_case , self.tool.inputs ):
if isinstance(_snake_case , _snake_case ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
a_ : List[str] = self.tool(*_snake_case )
if not isinstance(_snake_case , _snake_case ):
a_ : List[Any] = [outputs]
self.assertEqual(len(_snake_case ) , len(self.tool.outputs ) )
| 570 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ : List[Any] = Lock()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(SCREAMING_SNAKE_CASE )
def lowercase ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
for i in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
process_array_.append(
Process(
target=SCREAMING_SNAKE_CASE , args=(
len(SCREAMING_SNAKE_CASE ) - 1,
arr[len(SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = odd_even_transposition(SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 205 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = set_counts
UpperCAmelCase__: Optional[int] = max(_snake_case )
UpperCAmelCase__: Union[str, Any] = len(_snake_case )
UpperCAmelCase__: Optional[int] = [1] * num_sets
UpperCAmelCase__: List[Any] = list(range(_snake_case ) )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Any = self.get_parent(_snake_case )
UpperCAmelCase__: int = self.get_parent(_snake_case )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCAmelCase__: Union[str, Any] = 0
UpperCAmelCase__: str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCAmelCase__: Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCAmelCase__: Dict = 0
UpperCAmelCase__: List[str] = src_parent
UpperCAmelCase__: Optional[int] = self.set_counts[src_parent]
UpperCAmelCase__: Any = max(self.max_set , _snake_case )
return True
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if self.parents[disj_set] == disj_set:
return disj_set
UpperCAmelCase__: List[str] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 113 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = ''''''
for i in table:
res += inp[i - 1]
return res
def _a ( UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
return data[1:] + data[0]
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Dict = ''''''
for i in range(len(UpperCAmelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Any = int('''0b''' + data[0] + data[-1] , 2 )
lowerCamelCase__ : Any = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = message[:4]
lowerCamelCase__ : List[Any] = message[4:]
lowerCamelCase__ : List[Any] = apply_table(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : str = xor(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Any = apply_sbox(UpperCAmelCase , temp[:4] ) # noqa: E741
lowerCamelCase__ : int = apply_sbox(UpperCAmelCase , temp[4:] )
lowerCamelCase__ : str = '''0''' * (2 - len(UpperCAmelCase )) + l # noqa: E741
lowerCamelCase__ : Dict = '''0''' * (2 - len(UpperCAmelCase )) + r
lowerCamelCase__ : Dict = apply_table(l + r , UpperCAmelCase )
lowerCamelCase__ : List[str] = xor(UpperCAmelCase , UpperCAmelCase )
return temp + right
if __name__ == "__main__":
_A : Union[str, Any] = input('Enter 10 bit key: ')
_A : Any = input('Enter 8 bit message: ')
_A : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_A : List[str] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_A : Dict = [2, 4, 3, 1]
_A : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
_A : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
_A : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
_A : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_A : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_A : List[str] = apply_table(key, paa_table)
_A : Optional[Any] = temp[:5]
_A : Union[str, Any] = temp[5:]
_A : Dict = left_shift(left)
_A : List[Any] = left_shift(right)
_A : str = apply_table(left + right, pa_table)
_A : Tuple = left_shift(left)
_A : List[str] = left_shift(right)
_A : Any = left_shift(left)
_A : Union[str, Any] = left_shift(right)
_A : Optional[Any] = apply_table(left + right, pa_table)
# encryption
_A : List[Any] = apply_table(message, IP)
_A : List[Any] = function(expansion, sa, sa, keya, temp)
_A : str = temp[4:] + temp[:4]
_A : int = function(expansion, sa, sa, keya, temp)
_A : Tuple = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_A : Dict = apply_table(CT, IP)
_A : Optional[Any] = function(expansion, sa, sa, keya, temp)
_A : Optional[Any] = temp[4:] + temp[:4]
_A : List[Any] = function(expansion, sa, sa, keya, temp)
_A : Union[str, Any] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 315 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ (a_ , unittest.TestCase ):
snake_case =LxmertTokenizer
snake_case =LxmertTokenizerFast
snake_case =True
snake_case =True
def __UpperCamelCase ( self) -> Dict:
super().setUp()
a__ =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a__ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def __UpperCamelCase ( self , lowercase_) -> Optional[int]:
a__ ='''UNwant\u00E9d,running'''
a__ ='''unwanted, running'''
return input_text, output_text
def __UpperCamelCase ( self) -> int:
a__ =self.tokenizer_class(self.vocab_file)
a__ =tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(_snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [7, 4, 5, 10, 8, 9])
def __UpperCamelCase ( self) -> List[str]:
if not self.test_rust_tokenizer:
return
a__ =self.get_tokenizer()
a__ =self.get_rust_tokenizer()
a__ ='''I was born in 92000, and this is falsé.'''
a__ =tokenizer.tokenize(_snake_case)
a__ =rust_tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
a__ =tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
a__ =rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
a__ =self.get_rust_tokenizer()
a__ =tokenizer.encode(_snake_case)
a__ =rust_tokenizer.encode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
| 20 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict , __A : Any=2 , __A : List[str]=5_6 , __A : str=True , __A : Optional[int]=True , __A : Optional[int]=True , __A : Dict=True , __A : List[str]=9_9 , __A : Optional[int]=3_2 , __A : Any=2 , __A : Optional[Any]=2 , __A : Optional[int]=7 , __A : List[str]="gelu_new" , __A : Tuple=0.1 , __A : Dict=0.1 , __A : Tuple=5_1_2 , __A : List[str]=1_6 , __A : List[str]=2 , __A : Union[str, Any]=0.02 , __A : Optional[Any]=4 , __A : List[str]="block_sparse" , __A : Optional[int]=True , __A : Optional[Any]=False , __A : Any=2 , __A : str=3 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_attention_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_choices
__UpperCamelCase = rescale_embeddings
__UpperCamelCase = attention_type
__UpperCamelCase = use_bias
__UpperCamelCase = block_size
__UpperCamelCase = num_random_blocks
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_attention_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase = config_and_inputs
__UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case ( a_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : List[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : Tuple ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : str ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : str ):
super().test_hidden_states_output()
@slow
def _lowerCamelCase ( self : Any ):
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(_snake_case )
def _lowerCamelCase ( self : Optional[int] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(_snake_case , _snake_case )
__UpperCamelCase = model_class(_snake_case )
@jax.jit
def model_jitted(__A : List[str] , __A : Union[str, Any]=None , **__A : Any ):
return model(input_ids=_snake_case , attention_mask=_snake_case , **_snake_case )
with self.subTest('JIT Enabled' ):
__UpperCamelCase = model_jitted(**_snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self : Optional[Any] , __A : Union[str, Any] , __A : Dict , __A : Union[str, Any] , __A : List[Any]=1e-5 , __A : int="outputs" , __A : Optional[Any]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
| 399 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_( a_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = DanceDiffusionPipeline
__lowercase : Union[str, Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowercase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__lowercase : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowercase : List[Any] = False
__lowercase : Dict = False
def UpperCAmelCase_ ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_snake_case ,use_timestep_embedding=_snake_case ,time_embedding_type="""fourier""" ,mid_block_type="""UNetMidBlock1D""" ,down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") ,up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") ,)
lowerCAmelCase__ : List[Any] = IPNDMScheduler()
lowerCAmelCase__ : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> List[str]:
if str(_snake_case ).startswith("""mps""" ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(_snake_case )
else:
lowerCAmelCase__ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase__ : Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = DanceDiffusionPipeline(**_snake_case )
lowerCAmelCase__ : Any = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
lowerCAmelCase__ : Dict = pipe(**_snake_case )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : str = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase_ ( self ) -> Any:
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCAmelCase_ ( self ) -> Any:
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase_ ( self ) -> List[str]:
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase_ ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : str = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
lowerCAmelCase__ : Any = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Dict = pipe(generator=_snake_case ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Any = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : List[str] = torch_device
lowerCAmelCase__ : int = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" ,torch_dtype=torch.floataa )
lowerCAmelCase__ : List[str] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowerCAmelCase__ : Dict = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = pipe(generator=_snake_case ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : Optional[Any] = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 565 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
__lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
__lowerCAmelCase : List[Any] = {
"""abeja/gpt-neox-japanese-2.7b""": 20_48,
}
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = collections.OrderedDict()
lowerCAmelCase__ = collections.OrderedDict()
lowerCAmelCase__ = collections.OrderedDict()
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [[t.rstrip("""\n""" )] if (t == ''',''' or ''',''' not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(lowerCamelCase__ ):
lowerCAmelCase__ = b
lowerCAmelCase__ = idx
for wd in b:
lowerCAmelCase__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a_ ( a_ ):
UpperCamelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[str] = ['input_ids', 'attention_mask']
def __init__( self : str , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[Any]="<|endoftext|>" , snake_case__ : Optional[Any]="<|endoftext|>" , snake_case__ : Optional[int]="<|startoftext|>" , snake_case__ : List[str]="<|endoftext|>" , snake_case__ : Dict=False , **snake_case__ : List[Any] , ):
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
F"""Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"""
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_snake_case ):
raise ValueError(
F"""Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"""
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
lowerCAmelCase__ = do_clean_text
lowerCAmelCase__ = load_vocab_and_emoji(_snake_case , _snake_case )
lowerCAmelCase__ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _SCREAMING_SNAKE_CASE ( self : int ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Tuple ):
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Dict ):
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Union[str, Any] ):
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Tuple ):
lowerCAmelCase__ = ''''''.join(_snake_case ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Dict ):
lowerCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
lowerCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Tuple , snake_case__ : Optional[int] = None ):
lowerCAmelCase__ = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase__ = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
lowerCAmelCase__ = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
lowerCAmelCase__ = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
lowerCAmelCase__ = token_index
writer.write(""",""".join(_snake_case ) + """\n""" )
index += 1
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class a_ ( a_ ):
def __init__( self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
lowerCAmelCase__ = vocab # same as swe
lowerCAmelCase__ = ids_to_tokens # same as bpe
lowerCAmelCase__ = emoji
lowerCAmelCase__ = np.max([len(_snake_case ) for w in self.vocab.keys()] )
lowerCAmelCase__ = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
lowerCAmelCase__ = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
lowerCAmelCase__ = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
lowerCAmelCase__ = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
lowerCAmelCase__ = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
lowerCAmelCase__ = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
lowerCAmelCase__ = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
lowerCAmelCase__ = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
lowerCAmelCase__ = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : int ):
return len(self.ids_to_tokens )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ):
lowerCAmelCase__ = self.content_repattera.sub("""<URL>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<EMAIL>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<TEL>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<DATE>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<DATE>""" , _snake_case )
lowerCAmelCase__ = self.content_repattera.sub("""<PRICE>""" , _snake_case )
lowerCAmelCase__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase__ = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str]=False ):
lowerCAmelCase__ = text.replace(""" """ , """<SP>""" )
lowerCAmelCase__ = text.replace(""" """ , """<SP>""" )
lowerCAmelCase__ = text.replace("""\r\n""" , """<BR>""" )
lowerCAmelCase__ = text.replace("""\n""" , """<BR>""" )
lowerCAmelCase__ = text.replace("""\r""" , """<BR>""" )
lowerCAmelCase__ = text.replace("""\t""" , """<TAB>""" )
lowerCAmelCase__ = text.replace("""—""" , """ー""" )
lowerCAmelCase__ = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase__ = text.replace(_snake_case , _snake_case )
if clean:
lowerCAmelCase__ = self.clean_text(_snake_case )
def check_simbol(snake_case__ : List[str] ):
lowerCAmelCase__ = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
lowerCAmelCase__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(snake_case__ : Tuple ):
lowerCAmelCase__ = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
lowerCAmelCase__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
lowerCAmelCase__ = 0
lowerCAmelCase__ = []
while pos < len(_snake_case ):
lowerCAmelCase__ = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
lowerCAmelCase__ = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
lowerCAmelCase__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
lowerCAmelCase__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
lowerCAmelCase__ = sorted(_snake_case , key=lambda snake_case__ : x[0] )[0]
result.append(_snake_case )
lowerCAmelCase__ = e
else:
lowerCAmelCase__ = pos + 1
lowerCAmelCase__ = text[pos:end]
if check_simbol(_snake_case ):
result.append("""<KIGOU>""" )
elif checkuae(_snake_case ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
lowerCAmelCase__ = end
return result
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int="\n" ):
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
lowerCAmelCase__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
lowerCAmelCase__ = ''''''.join(_snake_case )
return text
| 644 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ : Tuple ={
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
A_ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=1_0 , )
return model
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase = DDPMScheduler()
UpperCamelCase = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
UpperCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
UpperCamelCase = torch.Generator(device=_snake_case ).manual_seed(4_2 )
UpperCamelCase = pipe(generator=_snake_case , steps=4 )
UpperCamelCase = output.audios[0]
UpperCamelCase = output.images[0]
UpperCamelCase = torch.Generator(device=_snake_case ).manual_seed(4_2 )
UpperCamelCase = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase = DDIMScheduler()
UpperCamelCase = self.dummy_vqvae_and_unet
UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
UpperCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase = torch.Generator(device=_snake_case ).manual_seed(4_2 )
UpperCamelCase = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=1_0 )
UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase = self.dummy_unet_condition
UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
UpperCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
UpperCamelCase = torch.rand((1, 1, 1_0) )
UpperCamelCase = pipe(generator=_snake_case , encoding=_snake_case )
UpperCamelCase = output.images[0]
UpperCamelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = torch_device
UpperCamelCase = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCamelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
UpperCamelCase = torch.Generator(device=_snake_case ).manual_seed(4_2 )
UpperCamelCase = pipe(generator=_snake_case )
UpperCamelCase = output.audios[0]
UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 212 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a__( a_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = (DPMSolverSDEScheduler,)
UpperCAmelCase_ : Optional[int] = 1_0
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_snake_case)
return config
def a_ ( self):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def a_ ( self):
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case)
def a_ ( self):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case)
def a_ ( self):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case)
scheduler.set_timesteps(self.num_inference_steps)
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(_snake_case)
for i, t in enumerate(scheduler.timesteps):
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case)
lowerCAmelCase = model(_snake_case , _snake_case)
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case)
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case))
lowerCAmelCase = torch.mean(torch.abs(_snake_case))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562) < 1E-2
assert abs(result_mean.item() - 0.211619570851326) < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""")
lowerCAmelCase = scheduler_class(**_snake_case)
scheduler.set_timesteps(self.num_inference_steps)
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(_snake_case)
for i, t in enumerate(scheduler.timesteps):
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case)
lowerCAmelCase = model(_snake_case , _snake_case)
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case)
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case))
lowerCAmelCase = torch.mean(torch.abs(_snake_case))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621) < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case)
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case)
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.to(_snake_case) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case)
lowerCAmelCase = model(_snake_case , _snake_case)
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case)
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case))
lowerCAmelCase = torch.mean(torch.abs(_snake_case))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562) < 1E-2
assert abs(result_mean.item() - 0.211619570851326) < 1E-3
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_snake_case , use_karras_sigmas=_snake_case)
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case)
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.to(_snake_case) * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(_snake_case)
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(_snake_case , _snake_case)
lowerCAmelCase = model(_snake_case , _snake_case)
lowerCAmelCase = scheduler.step(_snake_case , _snake_case , _snake_case)
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(_snake_case))
lowerCAmelCase = torch.mean(torch.abs(_snake_case))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811) < 1E-2
| 370 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] = "" ) -> dict[str, float]:
"""simple docstring"""
a_ : List[Any] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
a_ : Dict = BeautifulSoup(requests.get(__A ).text , 'html.parser' )
a_ : int = soup.find_all('td' , attrs='titleColumn' )
a_ : List[Any] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__A , __A )
}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] = "IMDb_Top_250_Movies.csv" ) -> None:
"""simple docstring"""
a_ : Optional[int] = get_imdb_top_aaa_movies()
with open(__A , 'w' , newline='' ) as out_file:
a_ : int = csv.writer(__A )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 570 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
def lowercase ( SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_ = True
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_ = False
for i in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
SCREAMING_SNAKE_CASE__ : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
SCREAMING_SNAKE_CASE__ : Union[str, Any] = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 205 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
import pytest
_lowerCAmelCase : List[Any] ="""__dummy_dataset1__"""
_lowerCAmelCase : Optional[int] ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _A ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _A ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Any = dataset_loading_script_name
UpperCAmelCase__: Tuple = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE )
UpperCAmelCase__: Any = script_dir / f"{script_name}.py"
with open(SCREAMING_SNAKE_CASE ,"w" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE ) | 113 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683 | 0 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCAmelCase : Tuple = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 683 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowerCAmelCase: Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
_lowerCAmelCase: Tuple = None
def _lowercase( ):
a__ =argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=__a , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=__a , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _lowercase( __a : List[str] ):
a__ ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ =bool(qa['answers']['text'] )
return qid_to_has_ans
def _lowercase( __a : List[str] ):
def remove_articles(__a : int ):
return ARTICLES_REGEX.sub(' ' , __a )
def white_space_fix(__a : List[str] ):
return " ".join(text.split() )
def remove_punc(__a : int ):
a__ =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__a : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) )
def _lowercase( __a : Dict ):
if not s:
return []
return normalize_answer(__a ).split()
def _lowercase( __a : Union[str, Any] , __a : List[str] ):
return int(normalize_answer(__a ) == normalize_answer(__a ) )
def _lowercase( __a : List[str] , __a : Dict ):
a__ =get_tokens(__a )
a__ =get_tokens(__a )
a__ =collections.Counter(__a ) & collections.Counter(__a )
a__ =sum(common.values() )
if len(__a ) == 0 or len(__a ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__ =1.0 * num_same / len(__a )
a__ =1.0 * num_same / len(__a )
a__ =(2 * precision * recall) / (precision + recall)
return fa
def _lowercase( __a : Union[str, Any] , __a : Optional[Any] ):
a__ ={}
a__ ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__ =qa['''id''']
a__ =[t for t in qa['''answers''']['''text'''] if normalize_answer(__a )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__ =['''''']
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
a__ =preds[qid]
# Take max over all gold answers
a__ =max(compute_exact(__a , __a ) for a in gold_answers )
a__ =max(compute_fa(__a , __a ) for a in gold_answers )
return exact_scores, fa_scores
def _lowercase( __a : str , __a : List[str] , __a : Dict , __a : Union[str, Any] ):
a__ ={}
for qid, s in scores.items():
a__ =na_probs[qid] > na_prob_thresh
if pred_na:
a__ =float(not qid_to_has_ans[qid] )
else:
a__ =s
return new_scores
def _lowercase( __a : List[Any] , __a : List[Any] , __a : List[Any]=None ):
if not qid_list:
a__ =len(__a )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values() ) / total),
('f1', 1_00.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a__ =len(__a )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def _lowercase( __a : Union[str, Any] , __a : Tuple , __a : Optional[int] ):
for k in new_eval:
a__ =new_eval[k]
def _lowercase( __a : Tuple , __a : Tuple , __a : str , __a : List[Any] ):
plt.step(__a , __a , color='b' , alpha=0.2 , where='post' )
plt.fill_between(__a , __a , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__a )
plt.savefig(__a )
plt.clf()
def _lowercase( __a : Optional[int] , __a : Optional[Any] , __a : List[str] , __a : Any , __a : str=None , __a : str=None ):
a__ =sorted(__a , key=lambda __a : na_probs[k] )
a__ =0.0
a__ =1.0
a__ =0.0
a__ =[1.0]
a__ =[0.0]
a__ =0.0
for i, qid in enumerate(__a ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__ =true_pos / float(i + 1 )
a__ =true_pos / float(__a )
if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__a )
recalls.append(__a )
if out_image:
plot_pr_curve(__a , __a , __a , __a )
return {"ap": 1_00.0 * avg_prec}
def _lowercase( __a : Optional[int] , __a : Optional[int] , __a : Dict , __a : Any , __a : str , __a : Optional[int] ):
if out_image_dir and not os.path.exists(__a ):
os.makedirs(__a )
a__ =sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__ =make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a__ =make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a__ ={k: float(__a ) for k, v in qid_to_has_ans.items()}
a__ =make_precision_recall_eval(
__a , __a , __a , __a , out_image=os.path.join(__a , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(__a , __a , 'pr_exact' )
merge_eval(__a , __a , 'pr_f1' )
merge_eval(__a , __a , 'pr_oracle' )
def _lowercase( __a : Union[str, Any] , __a : List[Any] , __a : Any , __a : Union[str, Any] ):
if not qid_list:
return
a__ =[na_probs[k] for k in qid_list]
a__ =np.ones_like(__a ) / float(len(__a ) )
plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(__a , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def _lowercase( __a : Dict , __a : str , __a : Dict , __a : str ):
a__ =sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__ =num_no_ans
a__ =cur_score
a__ =0.0
a__ =sorted(__a , key=lambda __a : na_probs[k] )
for i, qid in enumerate(__a ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__ =scores[qid]
else:
if preds[qid]:
a__ =-1
else:
a__ =0
cur_score += diff
if cur_score > best_score:
a__ =cur_score
a__ =na_probs[qid]
return 1_00.0 * best_score / len(__a ), best_thresh
def _lowercase( __a : List[Any] , __a : str , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Optional[Any] ):
a__ =find_best_thresh(__a , __a , __a , __a )
a__ =find_best_thresh(__a , __a , __a , __a )
a__ =best_exact
a__ =exact_thresh
a__ =best_fa
a__ =fa_thresh
def _lowercase( ):
with open(OPTS.data_file ) as f:
a__ =json.load(__a )
a__ =dataset_json['''data''']
with open(OPTS.pred_file ) as f:
a__ =json.load(__a )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__ =json.load(__a )
else:
a__ ={k: 0.0 for k in preds}
a__ =make_qid_to_has_ans(__a ) # maps qid to True/False
a__ =[k for k, v in qid_to_has_ans.items() if v]
a__ =[k for k, v in qid_to_has_ans.items() if not v]
a__ =get_raw_scores(__a , __a )
a__ =apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ =apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh )
a__ =make_eval_dict(__a , __a )
if has_ans_qids:
a__ =make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , 'HasAns' )
if no_ans_qids:
a__ =make_eval_dict(__a , __a , qid_list=__a )
merge_eval(__a , __a , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(__a , __a , __a , __a , __a , __a )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir )
histogram_na_prob(__a , __a , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(__a , __a , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(__a , __a )
else:
print(json.dumps(__a , indent=2 ) )
if __name__ == "__main__":
_lowerCAmelCase: Dict = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 20 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="attention" ) -> List[str]:
_UpperCamelCase : Dict = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_UpperCamelCase : int = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCamelCase : str = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_UpperCamelCase : Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_UpperCamelCase : Any = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_UpperCamelCase : Optional[int] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCamelCase : Optional[Any] = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_UpperCamelCase : List[Any] = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[str]:
if split_mlp_wi:
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_UpperCamelCase : Tuple = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_UpperCamelCase : Optional[Any] = (wi_a, wi_a)
else:
_UpperCamelCase : str = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_UpperCamelCase : int = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def snake_case__ ( UpperCamelCase ,*, UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ) -> int:
_UpperCamelCase : Any = traverse_util.flatten_dict(variables['''target'''] )
_UpperCamelCase : Optional[Any] = {'''/'''.join(UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCamelCase : str = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,UpperCamelCase )
_UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
_UpperCamelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''attention''' )
_UpperCamelCase : Tuple = layer_norm
_UpperCamelCase : int = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : Dict = v.T
# Block i, layer 1 (MLP).
_UpperCamelCase : Dict = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : int = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''encoder''' ,UpperCamelCase )
_UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Optional[Any] = wi[1].T
else:
_UpperCamelCase : List[Any] = wi.T
_UpperCamelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup(
UpperCamelCase ,UpperCamelCase ,'''encoder''' ).T
_UpperCamelCase : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_UpperCamelCase : List[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''encoder''' ).T
_UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
UpperCamelCase ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[Any] = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''self_attention''' )
_UpperCamelCase : int = layer_norm
_UpperCamelCase : Union[str, Any] = k.T
_UpperCamelCase : Optional[int] = o.T
_UpperCamelCase : Dict = q.T
_UpperCamelCase : Tuple = v.T
# Block i, layer 1 (Cross Attention).
_UpperCamelCase : str = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Dict = tax_attention_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''encoder_decoder_attention''' )
_UpperCamelCase : Dict = layer_norm
_UpperCamelCase : Optional[int] = k.T
_UpperCamelCase : int = o.T
_UpperCamelCase : List[Any] = q.T
_UpperCamelCase : str = v.T
# Block i, layer 2 (MLP).
_UpperCamelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_UpperCamelCase, _UpperCamelCase : List[Any] = tax_mlp_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ,UpperCamelCase )
_UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
_UpperCamelCase : Optional[Any] = wi[0].T
_UpperCamelCase : Union[str, Any] = wi[1].T
else:
_UpperCamelCase : Dict = wi.T
_UpperCamelCase : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCamelCase : int = tax_relpos_bias_lookup(UpperCamelCase ,UpperCamelCase ,'''decoder''' ).T
_UpperCamelCase : Optional[int] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCamelCase : str = old['''decoder/logits_dense/kernel'''].T
return new
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
_UpperCamelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCamelCase : int = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_UpperCamelCase : Any = state_dict['''shared.weight''']
return state_dict
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
_UpperCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCamelCase )
_UpperCamelCase : str = convert_tax_to_pytorch(
UpperCamelCase ,num_layers=config.num_layers ,is_encoder_only=UpperCamelCase ,scalable_attention=UpperCamelCase )
_UpperCamelCase : Optional[Any] = make_state_dict(UpperCamelCase ,UpperCamelCase )
model.load_state_dict(UpperCamelCase ,strict=UpperCamelCase )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
_UpperCamelCase : int = MTaConfig.from_json_file(UpperCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCamelCase : Optional[int] = UMTaEncoderModel(UpperCamelCase )
else:
_UpperCamelCase : Optional[int] = UMTaForConditionalGeneration(UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 683 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[Any] =logging.get_logger(__name__)
a__ : List[str] ={
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class snake_case ( a_ , a_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict ='focalnet'
def __init__( self : Union[str, Any] , __A : int=2_2_4 , __A : int=4 , __A : Dict=3 , __A : Union[str, Any]=9_6 , __A : Optional[int]=False , __A : Optional[Any]=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , __A : List[Any]=[2, 2, 6, 2] , __A : Tuple=[2, 2, 2, 2] , __A : int=[3, 3, 3, 3] , __A : str="gelu" , __A : List[Any]=4.0 , __A : str=0.0 , __A : Optional[Any]=0.1 , __A : Optional[int]=False , __A : Union[str, Any]=1e-4 , __A : Optional[int]=False , __A : List[str]=False , __A : int=False , __A : Tuple=0.02 , __A : List[Any]=1e-5 , __A : str=3_2 , __A : Tuple=None , __A : Any=None , **__A : Union[str, Any] , ):
super().__init__(**_snake_case )
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = embed_dim
__UpperCamelCase = use_conv_embed
__UpperCamelCase = hidden_sizes
__UpperCamelCase = depths
__UpperCamelCase = focal_levels
__UpperCamelCase = focal_windows
__UpperCamelCase = hidden_act
__UpperCamelCase = mlp_ratio
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = drop_path_rate
__UpperCamelCase = use_layerscale
__UpperCamelCase = layerscale_value
__UpperCamelCase = use_post_layernorm
__UpperCamelCase = use_post_layernorm_in_modulation
__UpperCamelCase = normalize_modulator
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = encoder_stride
__UpperCamelCase = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
__UpperCamelCase = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 399 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_UpperCAmelCase : int = 100
_UpperCAmelCase : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_UpperCAmelCase : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def snake_case__ ( UpperCamelCase ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
_UpperCamelCase : set[int] = set()
_UpperCamelCase : int
_UpperCamelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def snake_case__ ( UpperCamelCase = 50_00 ) -> int | None:
for number_to_partition in range(1 ,UpperCamelCase ):
if len(partition(UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 683 | 0 |
'''simple docstring'''
import copy
import re
class lowerCAmelCase_:
'''simple docstring'''
__lowercase : List[Any] = 'hp'
__lowercase : List[Any] = {}
__lowercase : Any = None
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = prefix
lowerCAmelCase__ : int = defaults
cls.build_naming_info()
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
if len(_snake_case ) == 0:
return ""
lowerCAmelCase__ : str = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(_snake_case ) + 1 ):
lowerCAmelCase__ : Union[str, Any] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowerCAmelCase__ : str = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = ''''''
while integer != 0:
lowerCAmelCase__ : Tuple = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
lowerCAmelCase__ : List[Any] = 0
while True:
lowerCAmelCase__ : int = word + '''#''' + int_to_alphabetic(_snake_case )
if sword in info["reverse_short_word"]:
continue
else:
lowerCAmelCase__ : Union[str, Any] = sword
break
lowerCAmelCase__ : Dict = short_word
lowerCAmelCase__ : Tuple = word
return short_word
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[Any] = param_name.split("""_""" )
lowerCAmelCase__ : int = [TrialShortNamer.shortname_for_word(_snake_case ,_snake_case ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowerCAmelCase__ : Optional[Any] = ['''''', '''_''']
for separator in separators:
lowerCAmelCase__ : List[Any] = separator.join(_snake_case )
if shortname not in info["reverse_short_param"]:
lowerCAmelCase__ : Optional[int] = shortname
lowerCAmelCase__ : List[str] = param_name
return shortname
return param_name
@staticmethod
def UpperCAmelCase_ ( __UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : str = TrialShortNamer.shortname_for_key(_snake_case ,_snake_case )
lowerCAmelCase__ : List[str] = short_name
lowerCAmelCase__ : Union[str, Any] = param_name
@classmethod
def UpperCAmelCase_ ( cls ) -> List[Any]:
if cls.NAMING_INFO is not None:
return
lowerCAmelCase__ : Tuple = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
lowerCAmelCase__ : Optional[int] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_snake_case ,_snake_case )
lowerCAmelCase__ : List[str] = info
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ) -> Optional[int]:
cls.build_naming_info()
assert cls.PREFIX is not None
lowerCAmelCase__ : Dict = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowerCAmelCase__ : List[str] = cls.NAMING_INFO['''short_param'''][k]
if isinstance(_snake_case ,_snake_case ):
lowerCAmelCase__ : List[Any] = 1 if v else 0
lowerCAmelCase__ : Any = '''''' if isinstance(_snake_case ,(int, float) ) else '''-'''
lowerCAmelCase__ : Optional[Any] = F"""{key}{sep}{v}"""
name.append(_snake_case )
return "_".join(_snake_case )
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowerCAmelCase__ : List[str] = []
else:
lowerCAmelCase__ : str = repr.split("""_""" )
lowerCAmelCase__ : Tuple = {}
for value in values:
if "-" in value:
lowerCAmelCase__ : Optional[int] = value.split("""-""" )
else:
lowerCAmelCase__ : Union[str, Any] = re.sub("""[0-9.]""" ,"""""" ,_snake_case )
lowerCAmelCase__ : Dict = float(re.sub("""[^0-9.]""" ,"""""" ,_snake_case ) )
lowerCAmelCase__ : Tuple = cls.NAMING_INFO['''reverse_short_param'''][p_k]
lowerCAmelCase__ : List[Any] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowerCAmelCase__ : Optional[int] = cls.DEFAULTS[k]
return parameters
| 565 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_UpperCAmelCase : Dict = """bart"""
_UpperCAmelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> int:
if LOAD_DENSE_INDEX:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Tuple = qar_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Any = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : Dict = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Tuple = sas_model.eval()
else:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = make_qa_sas_model(
model_name='''t5-small''' ,from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' ,device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
_UpperCamelCase : str = faiss.StandardGpuResources()
_UpperCamelCase : Optional[int] = datasets.load_dataset(path='''wiki_snippets''' ,name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(wikiaab_passages.num_rows, 1_28) ,)
_UpperCamelCase : Any = faiss.IndexFlatIP(1_28 )
_UpperCamelCase : str = faiss.index_cpu_to_gpu(UpperCamelCase ,1 ,UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
_UpperCamelCase, _UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : List[Any] = datasets.load_dataset('''eli5''' ,name='''LFQA_reddit''' )
_UpperCamelCase : Optional[int] = elia['''train_eli5''']
_UpperCamelCase : Any = np.memmap(
'''eli5_questions_reps.dat''' ,dtype='''float32''' ,mode='''r''' ,shape=(elia_train.num_rows, 1_28) )
_UpperCamelCase : Optional[Any] = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_indexes()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_models()
_UpperCAmelCase , _UpperCAmelCase : int = load_train_data()
def snake_case__ ( UpperCamelCase ,UpperCamelCase=10 ) -> Optional[Any]:
_UpperCamelCase : Optional[int] = embed_questions_for_retrieval([question] ,UpperCamelCase ,UpperCamelCase )
_UpperCamelCase, _UpperCamelCase : Optional[Any] = eli5_train_q_index.search(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def snake_case__ ( UpperCamelCase ,UpperCamelCase="wiki40b" ,UpperCamelCase="dense" ,UpperCamelCase=10 ) -> Optional[int]:
if source == "none":
_UpperCamelCase, _UpperCamelCase : Dict = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase, _UpperCamelCase : str = query_qa_dense_index(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
else:
_UpperCamelCase, _UpperCamelCase : str = query_es_index(
UpperCamelCase ,UpperCamelCase ,index_name='''english_wiki40b_snippets_100w''' ,n_results=UpperCamelCase ,)
_UpperCamelCase : Optional[int] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : Optional[Any] = '''question: {} context: {}'''.format(UpperCamelCase ,UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=64 ,UpperCamelCase=2_56 ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=0.95 ,UpperCamelCase=0.8 ) -> Optional[Any]:
with torch.no_grad():
_UpperCamelCase : Any = qa_sas_generate(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,num_answers=1 ,num_beams=UpperCamelCase ,min_len=UpperCamelCase ,max_len=UpperCamelCase ,do_sample=UpperCamelCase ,temp=UpperCamelCase ,top_p=UpperCamelCase ,top_k=UpperCamelCase ,max_input_length=10_24 ,device='''cuda:0''' ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_UpperCAmelCase : str = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_UpperCAmelCase : Tuple = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_UpperCAmelCase : Dict = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_UpperCAmelCase : List[str] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_UpperCAmelCase : Optional[int] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_UpperCAmelCase : List[str] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_UpperCAmelCase : List[Any] = action_list.index(action_st)
_UpperCAmelCase : Tuple = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_UpperCAmelCase : Optional[Any] = show_type == """Show full text of passages"""
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : str = True
_UpperCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_UpperCAmelCase : Optional[Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_UpperCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_UpperCAmelCase : Dict = """wiki40b"""
_UpperCAmelCase : str = """dense"""
_UpperCAmelCase : List[str] = """beam"""
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = 64
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = st.sidebar.checkbox("""Generation options""")
if generate_options:
_UpperCAmelCase : Union[str, Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_UpperCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_UpperCAmelCase : Dict = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_UpperCAmelCase : List[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_UpperCAmelCase : List[str] = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[Any] = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_UpperCAmelCase : Optional[int] = None
# start main text
_UpperCAmelCase : Union[str, Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_UpperCAmelCase : int = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_UpperCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
_UpperCAmelCase : Tuple = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_UpperCAmelCase , _UpperCAmelCase : str = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_UpperCAmelCase : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_UpperCAmelCase : int = support_list[:10]
_UpperCAmelCase : Tuple = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_UpperCAmelCase , _UpperCAmelCase : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_UpperCAmelCase : Tuple = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_UpperCAmelCase : List[Any] = res[1].strip()
if sec_titles == "":
_UpperCAmelCase : Optional[int] = """[{}]({})""".format(res[0], wiki_url)
else:
_UpperCAmelCase : Optional[int] = sec_titles.split(""" & """)
_UpperCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_UpperCAmelCase : Dict = find_nearest_training(question)
_UpperCAmelCase : List[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_UpperCAmelCase : List[Any] = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_UpperCAmelCase : List[Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 683 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> Optional[int]:
_UpperCamelCase : int = value
_UpperCamelCase : Node | None = None # Added in order to delete a node easier
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case = None ) -> List[Any]:
_UpperCamelCase : str = root
def __str__( self ) -> str:
return str(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if new_children is not None: # reset its kids
_UpperCamelCase : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_snake_case ): # If it is the right children
_UpperCamelCase : str = new_children
else:
_UpperCamelCase : Any = new_children
else:
_UpperCamelCase : Any = new_children
def _lowercase ( self , _snake_case ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self ) -> bool:
return self.root is None
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : List[Any] = Node(_snake_case ) # create a new Node
if self.empty(): # if Tree is empty
_UpperCamelCase : Optional[Any] = new_node # set its root
else: # Tree is not empty
_UpperCamelCase : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_UpperCamelCase : Union[str, Any] = new_node # We insert the new node in a leaf
break
else:
_UpperCamelCase : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
_UpperCamelCase : Any = new_node
break
else:
_UpperCamelCase : str = parent_node.right
_UpperCamelCase : Any = parent_node
def _lowercase ( self , *_snake_case ) -> None:
for value in values:
self.__insert(_snake_case )
def _lowercase ( self , _snake_case ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_UpperCamelCase : List[str] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_UpperCamelCase : Optional[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_UpperCamelCase : Dict = self.root
if not self.empty():
while node.right is not None:
_UpperCamelCase : Tuple = node.right
return node
def _lowercase ( self , _snake_case = None ) -> Node | None:
if node is None:
_UpperCamelCase : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
_UpperCamelCase : Optional[int] = self.root
while node.left is not None:
_UpperCamelCase : List[str] = node.left
return node
def _lowercase ( self , _snake_case ) -> None:
_UpperCamelCase : str = self.search(_snake_case ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_snake_case , _snake_case )
elif node.left is None: # Has only right children
self.__reassign_nodes(_snake_case , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_snake_case , node.left )
else:
_UpperCamelCase : List[str] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_UpperCamelCase : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self , _snake_case ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self , _snake_case=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self , _snake_case , _snake_case ) -> None:
if node:
self.inorder(_snake_case , node.left )
arr.append(node.value )
self.inorder(_snake_case , node.right )
def _lowercase ( self , _snake_case , _snake_case ) -> int:
_UpperCamelCase : list[int] = []
self.inorder(_snake_case , _snake_case ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case__ ( UpperCamelCase ) -> list[Node]:
_UpperCamelCase : int = []
if curr_node is not None:
_UpperCamelCase : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_UpperCamelCase : Tuple = BinarySearchTree()
for i in testlist:
t.insert(UpperCamelCase )
# Prints all the elements of the list in order traversal
print(UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' ,t.get_max().value ) # type: ignore
print('''Min Value: ''' ,t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCamelCase )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 0 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __UpperCAmelCase ( a_ ):
@slow
@require_torch
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
lowerCAmelCase_ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase_ = bertabert.config.encoder.vocab_size
lowerCAmelCase_ = tokenizer.sep_token_id
lowerCAmelCase_ = tokenizer.cls_token_id
lowerCAmelCase_ = 128
lowerCAmelCase_ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
lowerCAmelCase_ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
lowerCAmelCase_ = train_dataset.select(range(32 ) )
lowerCAmelCase_ = val_dataset.select(range(16 ) )
lowerCAmelCase_ = 4
def _map_to_encoder_decoder_inputs(_lowerCamelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCAmelCase_ = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
lowerCAmelCase_ = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
lowerCAmelCase_ = inputs.input_ids
lowerCAmelCase_ = inputs.attention_mask
lowerCAmelCase_ = outputs.input_ids
lowerCAmelCase_ = outputs.input_ids.copy()
lowerCAmelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
lowerCAmelCase_ = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCamelCase ):
lowerCAmelCase_ = pred.label_ids
lowerCAmelCase_ = pred.predictions
# all unnecessary tokens are removed
lowerCAmelCase_ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
lowerCAmelCase_ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
lowerCAmelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
lowerCAmelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
lowerCAmelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
lowerCAmelCase_ = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCAmelCase_ = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 274 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_UpperCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_UpperCAmelCase : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : Dict = 'whisper'
A__ : Tuple = ['past_key_values']
A__ : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _snake_case=51865 , _snake_case=80 , _snake_case=6 , _snake_case=4 , _snake_case=6 , _snake_case=4 , _snake_case=1536 , _snake_case=1536 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=50257 , _snake_case=True , _snake_case=True , _snake_case="gelu" , _snake_case=256 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=False , _snake_case=1500 , _snake_case=448 , _snake_case=50256 , _snake_case=50256 , _snake_case=50256 , _snake_case=None , _snake_case=[220, 50256] , _snake_case=False , _snake_case=256 , _snake_case=False , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=7 , **_snake_case , ) -> Any:
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = num_mel_bins
_UpperCamelCase : List[str] = d_model
_UpperCamelCase : str = encoder_layers
_UpperCamelCase : Optional[int] = encoder_attention_heads
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : Tuple = decoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : Optional[int] = encoder_ffn_dim
_UpperCamelCase : Any = dropout
_UpperCamelCase : Optional[Any] = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : int = activation_function
_UpperCamelCase : List[Any] = init_std
_UpperCamelCase : Optional[int] = encoder_layerdrop
_UpperCamelCase : str = decoder_layerdrop
_UpperCamelCase : List[str] = use_cache
_UpperCamelCase : Optional[Any] = encoder_layers
_UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : List[str] = max_source_positions
_UpperCamelCase : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase : str = classifier_proj_size
_UpperCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase : int = apply_spec_augment
_UpperCamelCase : str = mask_time_prob
_UpperCamelCase : int = mask_time_length
_UpperCamelCase : List[Any] = mask_time_min_masks
_UpperCamelCase : List[str] = mask_feature_prob
_UpperCamelCase : Optional[int] = mask_feature_length
_UpperCamelCase : Union[str, Any] = mask_feature_min_masks
_UpperCamelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
_UpperCamelCase : Dict = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
_UpperCamelCase : Tuple = {0: '''batch'''}
else:
_UpperCamelCase : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def _lowercase ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , _snake_case = 22050 , _snake_case = 5.0 , _snake_case = 220 , ) -> Mapping[str, Any]:
_UpperCamelCase : Optional[int] = OrderedDict()
_UpperCamelCase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
_UpperCamelCase : int = encoder_inputs['''input_features'''].shape[2]
_UpperCamelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
_UpperCamelCase : Union[str, Any] = encoder_inputs.pop('''input_features''' )
_UpperCamelCase : Dict = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
_UpperCamelCase : List[str] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowercase ( self ) -> float:
return 1E-3
| 683 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
snake_case_ : Dict = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
snake_case_ : Dict = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
snake_case_ : str = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Any = None, _UpperCAmelCase : str = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
UpperCamelCase = new_id
# turn into Numpy arrays
UpperCamelCase = np.array(_UpperCAmelCase)
UpperCamelCase = np.array(_UpperCAmelCase)
if reduce_labels:
UpperCamelCase = 255
UpperCamelCase = label - 1
UpperCamelCase = 255
UpperCamelCase = label != ignore_index
UpperCamelCase = np.not_equal(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = pred_label[mask]
UpperCamelCase = np.array(_UpperCAmelCase)[mask]
UpperCamelCase = pred_label[pred_label == label]
UpperCamelCase = np.histogram(_UpperCAmelCase, bins=_UpperCAmelCase, range=(0, num_labels - 1))[0]
UpperCamelCase = np.histogram(_UpperCAmelCase, bins=_UpperCAmelCase, range=(0, num_labels - 1))[0]
UpperCamelCase = np.histogram(_UpperCAmelCase, bins=_UpperCAmelCase, range=(0, num_labels - 1))[0]
UpperCamelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __snake_case ( _UpperCAmelCase : Optional[int], _UpperCAmelCase : Dict, _UpperCAmelCase : Any, _UpperCAmelCase : str, _UpperCAmelCase : str = None, _UpperCAmelCase : Optional[Any] = False, ):
UpperCamelCase = np.zeros((num_labels,), dtype=np.floataa)
UpperCamelCase = np.zeros((num_labels,), dtype=np.floataa)
UpperCamelCase = np.zeros((num_labels,), dtype=np.floataa)
UpperCamelCase = np.zeros((num_labels,), dtype=np.floataa)
for result, gt_seg_map in zip(_UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase = intersect_and_union(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __snake_case ( _UpperCAmelCase : Tuple, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Any = None, _UpperCAmelCase : List[str] = None, _UpperCAmelCase : Dict = False, ):
UpperCamelCase = total_intersect_and_union(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase)
# compute metrics
UpperCamelCase = {}
UpperCamelCase = total_area_intersect.sum() / total_area_label.sum()
UpperCamelCase = total_area_intersect / total_area_union
UpperCamelCase = total_area_intersect / total_area_label
UpperCamelCase = np.nanmean(_UpperCAmelCase)
UpperCamelCase = np.nanmean(_UpperCAmelCase)
UpperCamelCase = all_acc
UpperCamelCase = iou
UpperCamelCase = acc
if nan_to_num is not None:
UpperCamelCase = {metric: np.nan_to_num(_UpperCAmelCase, nan=_UpperCAmelCase) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , ):
'''simple docstring'''
UpperCamelCase = mean_iou(
results=_snake_case , gt_seg_maps=_snake_case , num_labels=_snake_case , ignore_index=_snake_case , nan_to_num=_snake_case , label_map=_snake_case , reduce_labels=_snake_case , )
return iou_result
| 212 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__( a_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 'ssube/stable-diffusion-x4-upscaler-onnx'
def a_ ( self , __lowerCAmelCase=0):
"""simple docstring"""
lowerCAmelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(_snake_case))
lowerCAmelCase = torch.manual_seed(_snake_case)
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
pipe.set_progress_bar_config(disable=_snake_case)
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_snake_case)
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_snake_case)
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_snake_case)
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_snake_case).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a__( unittest.TestCase ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
lowerCAmelCase = init_image.resize((128, 128))
# using the PNDM scheduler by default
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case)
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pipe(
prompt=_snake_case , image=_snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=_snake_case , output_type="""np""" , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
lowerCAmelCase = init_image.resize((128, 128))
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""")
lowerCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=_snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_snake_case)
lowerCAmelCase = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase = torch.manual_seed(0)
lowerCAmelCase = pipe(
prompt=_snake_case , image=_snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=_snake_case , output_type="""np""" , )
lowerCAmelCase = output.images
lowerCAmelCase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 370 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self , _snake_case , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : int = None
_UpperCamelCase : int = 20
_UpperCamelCase : Any = self._get_uniform_logits(batch_size=2 , length=_snake_case )
# tweak scores to not be uniform anymore
_UpperCamelCase : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Any = jax.nn.softmax(_snake_case , axis=-1 )
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
_UpperCamelCase : str = jax.nn.softmax(temp_dist_warper_smoother(_snake_case , scores.copy() , cur_len=_snake_case ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[int] = 10
_UpperCamelCase : Any = 2
# create ramp distribution
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : Optional[int] = 5
_UpperCamelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Union[str, Any] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : Optional[Any] = top_k_warp_safety_check(_snake_case , _snake_case , cur_len=_snake_case )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : Any = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_UpperCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : Dict = np.exp(top_p_warp(_snake_case , _snake_case , cur_len=_snake_case ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Optional[int] = np.broadcast_to(np.arange(_snake_case )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Dict:
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : int = 0
_UpperCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
# check that min length is applied at length 5
_UpperCamelCase : Any = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : int = 5
_UpperCamelCase : List[Any] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Optional[int] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = 15
_UpperCamelCase : Optional[int] = min_dist_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Union[str, Any] = 4
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : str = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : str = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : List[str] = 3
_UpperCamelCase : Tuple = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 20
_UpperCamelCase : Tuple = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : str = 5
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Dict = 4
_UpperCamelCase : Dict = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : int = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : Optional[int] = 3
_UpperCamelCase : Any = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[Any] = logits_processor(_snake_case , _snake_case , cur_len=_snake_case )
self.assertFalse(jnp.isinf(_snake_case ).any() )
def _lowercase ( self ) -> str:
_UpperCamelCase : Dict = 4
_UpperCamelCase : Optional[Any] = 10
_UpperCamelCase : Dict = 15
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : Optional[Any] = 1
_UpperCamelCase : List[Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Any = input_ids.copy()
_UpperCamelCase : int = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : List[str] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : List[str] = 10
# no processor list
_UpperCamelCase : Dict = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Optional[int] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
# with processor list
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : Optional[Any] = processor(_snake_case , _snake_case , cur_len=_snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = 4
_UpperCamelCase : int = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = 2
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Optional[int] = 15
# dummy input_ids and scores
_UpperCamelCase : Tuple = ids_tensor((batch_size, sequence_length) , _snake_case )
_UpperCamelCase : Optional[Any] = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(_snake_case , _snake_case )
_UpperCamelCase : Optional[int] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Dict = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_snake_case )
_UpperCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_snake_case )
_UpperCamelCase : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_snake_case , eos_token_id=_snake_case )
_UpperCamelCase : Union[str, Any] = 10
# no processor list
def run_no_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : List[Any] = temp_dist_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Tuple = top_k_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = top_p_warp(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = min_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : Union[str, Any] = bos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
_UpperCamelCase : str = eos_dist_proc(_snake_case , _snake_case , cur_len=_snake_case )
return scores
# with processor list
def run_processor_list(_snake_case , _snake_case , _snake_case ):
_UpperCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[str] = processor(_snake_case , _snake_case , cur_len=_snake_case )
return scores
_UpperCamelCase : Dict = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jax.jit(_snake_case )
_UpperCamelCase : Optional[int] = jitted_run_no_processor_list(_snake_case , _snake_case , _snake_case )
_UpperCamelCase : Any = jitted_run_processor_list(_snake_case , _snake_case , _snake_case )
# scores should be equal
self.assertTrue(jnp.allclose(_snake_case , _snake_case , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 683 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def SCREAMING_SNAKE_CASE_ ( __A : str = True , *__A : Union[str, Any] , **__A : Tuple ) -> List[str]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
a_ : Optional[Any] = False
if main_process_only:
a_ : Optional[int] = PartialState().local_process_index == 0
return _tqdm(*__A , **__A , disable=__A )
| 570 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 0 |
from math import pi, sqrt, tan
def lowercase ( SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowercase ( SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowercase ( SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
SCREAMING_SNAKE_CASE_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowercase ( SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
SCREAMING_SNAKE_CASE_ = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowercase ( SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \\nlength of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print("\nSurface Areas of various geometric shapes: \n")
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 205 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def _lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
_UpperCamelCase : int = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
_UpperCamelCase : int = DDPMScheduler()
_UpperCamelCase : Optional[int] = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case , steps=4 )
_UpperCamelCase : Union[str, Any] = output.audios[0]
_UpperCamelCase : Union[str, Any] = output.images[0]
_UpperCamelCase : str = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : int = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case )
_UpperCamelCase : int = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCamelCase : List[str] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : int = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : str = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
_UpperCamelCase : Dict = DDIMScheduler()
_UpperCamelCase : str = self.dummy_vqvae_and_unet
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCamelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Tuple = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10 )
_UpperCamelCase : List[str] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCamelCase : Any = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Tuple = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCamelCase : Any = self.dummy_unet_condition
_UpperCamelCase : List[Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case )
_UpperCamelCase : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
np.random.seed(0 )
_UpperCamelCase : int = torch.rand((1, 1, 10) )
_UpperCamelCase : Optional[Any] = pipe(generator=_snake_case , encoding=_snake_case )
_UpperCamelCase : Dict = output.images[0]
_UpperCamelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
_UpperCamelCase : Optional[int] = torch_device
_UpperCamelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
_UpperCamelCase : str = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_UpperCamelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(42 )
_UpperCamelCase : Optional[int] = pipe(generator=_snake_case )
_UpperCamelCase : List[Any] = output.audios[0]
_UpperCamelCase : List[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCamelCase : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
_UpperCamelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 683 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(a_ )
class __UpperCamelCase ( a_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
super().__init__(*_snake_case , **_snake_case )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _UpperCAmelCase ( self , lowerCamelCase__=None ):
UpperCAmelCase__: int = {}
if top_k is not None:
UpperCAmelCase__: int = top_k
return {}, {}, postprocess_params
def __call__( self , lowerCamelCase__ , **lowerCamelCase__ ):
return super().__call__(_snake_case , **_snake_case )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = load_image(_snake_case )
UpperCAmelCase__: List[Any] = self.image_processor(images=_snake_case , return_tensors=self.framework )
return model_inputs
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: Tuple = self.model(**_snake_case )
return model_outputs
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=5 ):
if top_k > self.model.config.num_labels:
UpperCAmelCase__: str = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase__: List[Any] = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase__: Union[str, Any] = probs.topk(_snake_case )
elif self.framework == "tf":
UpperCAmelCase__: List[str] = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase__: Any = tf.math.top_k(_snake_case , k=_snake_case )
UpperCAmelCase__: List[str] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
UpperCAmelCase__: Optional[int] = scores.tolist()
UpperCAmelCase__: Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case )] | 113 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 683 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __SCREAMING_SNAKE_CASE ( a_ ):
def __lowerCamelCase ( self : int ) ->Dict:
lowerCamelCase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_snake_case , '''depth_multiplier''' ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , A : List[Any] , A : Any=1_3 , A : Union[str, Any]=3 , A : Union[str, Any]=3_2 , A : str=0.25 , A : Optional[int]=8 , A : Any=True , A : Tuple=1_0_2_4 , A : Dict=3_2 , A : List[Any]="relu6" , A : str=0.1 , A : Optional[int]=0.02 , A : Optional[int]=True , A : Any=True , A : Tuple=1_0 , A : Optional[int]=None , ) ->Dict:
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : Union[str, Any] = depth_multiplier
lowerCamelCase__ : List[Any] = min_depth
lowerCamelCase__ : Tuple = tf_padding
lowerCamelCase__ : Any = int(last_hidden_size * depth_multiplier )
lowerCamelCase__ : Dict = output_stride
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : int = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Union[str, Any] = scope
def __lowerCamelCase ( self : List[str] ) ->str:
lowerCamelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self : List[str] , A : str , A : str , A : List[str] , A : List[Any] ) ->Optional[int]:
lowerCamelCase__ : List[Any] = MobileNetVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase__ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowerCamelCase ( self : int , A : Dict , A : int , A : List[str] , A : Optional[int] ) ->int:
lowerCamelCase__ : Optional[Any] = self.num_labels
lowerCamelCase__ : int = MobileNetVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCamelCase__ : Tuple = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : Tuple ) ->str:
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ : int = config_and_inputs
lowerCamelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( a_ ,a_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_UpperCAmelCase : Optional[int] = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def __lowerCamelCase ( self : int ) ->List[Any]:
lowerCamelCase__ : List[str] = MobileNetVaModelTester(self )
lowerCamelCase__ : Union[str, Any] = MobileNetVaConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def __lowerCamelCase ( self : List[str] ) ->str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __lowerCamelCase ( self : Optional[int] ) ->Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __lowerCamelCase ( self : Tuple ) ->Tuple:
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __lowerCamelCase ( self : Dict ) ->Optional[int]:
pass
def __lowerCamelCase ( self : Dict ) ->str:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str = model_class(_snake_case )
lowerCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def __lowerCamelCase ( self : Optional[Any] ) ->Tuple:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
def check_hidden_states_output(A : Tuple , A : str , A : str ):
lowerCamelCase__ : Dict = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowerCamelCase__ : str = model(**self._prepare_for_class(_snake_case , _snake_case ) )
lowerCamelCase__ : str = outputs.hidden_states
lowerCamelCase__ : List[Any] = 2_6
self.assertEqual(len(_snake_case ) , _snake_case )
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Dict = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def __lowerCamelCase ( self : List[Any] ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def __lowerCamelCase ( self : Tuple ) ->Dict:
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = MobileNetVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : List[Any] ) ->Optional[int]:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __lowerCamelCase ( self : Dict ) ->Dict:
lowerCamelCase__ : Dict = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_snake_case )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : List[str] = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowerCamelCase__ : Optional[int] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , _snake_case )
lowerCamelCase__ : Union[str, Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
| 315 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
_UpperCAmelCase : Any = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Tuple = DistilBertTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case=True , _snake_case=None , **_snake_case , ) -> int:
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , tokenize_chinese_chars=_snake_case , strip_accents=_snake_case , **_snake_case , )
_UpperCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _snake_case ) != tokenize_chinese_chars
):
_UpperCamelCase : int = getattr(_snake_case , normalizer_state.pop('''type''' ) )
_UpperCamelCase : Optional[int] = do_lower_case
_UpperCamelCase : Dict = strip_accents
_UpperCamelCase : List[Any] = tokenize_chinese_chars
_UpperCamelCase : Tuple = normalizer_class(**_snake_case )
_UpperCamelCase : Dict = do_lower_case
def _lowercase ( self , _snake_case , _snake_case=None ) -> Optional[int]:
_UpperCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[int]:
_UpperCamelCase : Union[str, Any] = [self.sep_token_id]
_UpperCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
_UpperCamelCase : Optional[Any] = self._tokenizer.model.save(_snake_case , name=_snake_case )
return tuple(_snake_case )
| 683 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_lowerCAmelCase: Any = TypeVar('T')
class lowercase_ (Generic[T] ):
def __init__( self , lowercase_) -> List[Any]:
a__ =data
a__ =None
def __str__( self) -> str:
return F"""{self.data}"""
class lowercase_ (Generic[T] ):
def __init__( self) -> None:
a__ =None
def __iter__( self) -> Iterator[T]:
a__ =self.top
while node:
yield node.data
a__ =node.next
def __str__( self) -> str:
return "->".join([str(_snake_case) for item in self])
def __len__( self) -> int:
return len(tuple(iter(self)))
def __UpperCamelCase ( self) -> bool:
return self.top is None
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =Node(_snake_case)
if not self.is_empty():
a__ =self.top
a__ =node
def __UpperCamelCase ( self) -> T:
if self.is_empty():
raise IndexError('pop from empty stack')
assert isinstance(self.top , _snake_case)
a__ =self.top
a__ =self.top.next
return pop_node.data
def __UpperCamelCase ( self) -> T:
if self.is_empty():
raise IndexError('peek from empty stack')
assert self.top is not None
return self.top.data
def __UpperCamelCase ( self) -> None:
a__ =None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 20 |
'''simple docstring'''
def snake_case__ ( UpperCamelCase ) -> list:
_UpperCamelCase : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
_UpperCamelCase : List[str] = True
for i in range(0 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : int = False
for i in range(1 ,len(UpperCamelCase ) - 1 ,2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_UpperCamelCase, _UpperCamelCase : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
_UpperCamelCase : Optional[int] = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase : Union[str, Any] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 683 | 0 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
a__ : Dict ="""bart"""
a__ : List[str] =True
@st.cache(allow_output_mutation=__lowercase )
def lowercase__ ( ) -> int:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__UpperCamelCase = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__UpperCamelCase = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__UpperCamelCase = qar_model.eval()
else:
__UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCamelCase = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__UpperCamelCase = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__UpperCamelCase = sas_model.eval()
else:
__UpperCamelCase = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=__lowercase )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__UpperCamelCase = faiss.StandardGpuResources()
__UpperCamelCase = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['''train''']
__UpperCamelCase = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCamelCase = faiss.IndexFlatIP(128 )
__UpperCamelCase = faiss.index_cpu_to_gpu(__lowercase , 1 , __lowercase )
wikiaab_gpu_index_flat.add(__lowercase ) # TODO fix for larger GPU
else:
__UpperCamelCase = (None, None)
__UpperCamelCase = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=__lowercase )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__UpperCamelCase = elia['''train_eli5''']
__UpperCamelCase = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__UpperCamelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(__lowercase )
return (elia_train, eli5_train_q_index)
a__ : Dict =load_indexes()
a__ : Dict =load_models()
a__ : int =load_train_data()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple=10 ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = embed_questions_for_retrieval([question] , __lowercase , __lowercase )
__UpperCamelCase = eli5_train_q_index.search(__lowercase , __lowercase )
__UpperCamelCase = [elia_train[int(__lowercase )] for i in I[0]]
return nn_examples
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple="wiki40b" , __lowercase : Optional[int]="dense" , __lowercase : Union[str, Any]=10 ) -> Optional[int]:
"""simple docstring"""
if source == "none":
__UpperCamelCase = (''' <P> '''.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCamelCase = query_qa_dense_index(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
else:
__UpperCamelCase = query_es_index(
__lowercase , __lowercase , index_name='english_wiki40b_snippets_100w' , n_results=__lowercase , )
__UpperCamelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCamelCase = '''question: {} context: {}'''.format(__lowercase , __lowercase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda __lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowercase : None),
} )
def lowercase__ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : Tuple , __lowercase : Any=64 , __lowercase : List[Any]=256 , __lowercase : int=False , __lowercase : Dict=2 , __lowercase : List[str]=0.9_5 , __lowercase : Optional[int]=0.8 ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
__UpperCamelCase = qa_sas_generate(
__lowercase , __lowercase , __lowercase , num_answers=1 , num_beams=__lowercase , min_len=__lowercase , max_len=__lowercase , do_sample=__lowercase , temp=__lowercase , top_p=__lowercase , top_k=__lowercase , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
a__ : str ="""<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
a__ : Tuple ="""
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
a__ : Dict ="""
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
a__ : List[str] =[
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
a__ : Optional[int] =st.sidebar.checkbox('''Demo options''')
if demo_options:
a__ : List[str] =st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
a__ : List[Any] =action_list.index(action_st)
a__ : Tuple =st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
a__ : Optional[Any] =show_type == """Show full text of passages"""
else:
a__ : Union[str, Any] =3
a__ : str =True
a__ : str =st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
a__ : Optional[Any] ="""
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
a__ : str =st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
a__ : Optional[Any] =st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
a__ : Dict ="""wiki40b"""
a__ : str ="""dense"""
a__ : List[str] ="""beam"""
a__ : Dict =2
a__ : List[str] =64
a__ : List[Any] =256
a__ : Tuple =None
a__ : Union[str, Any] =None
a__ : int =st.sidebar.checkbox('''Generation options''')
if generate_options:
a__ : Union[str, Any] ="""
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
a__ : str =st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
a__ : Dict =st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
a__ : List[Any] =st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
a__ : List[str] =st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
a__ : Optional[Any] =st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
a__ : Optional[Any] =st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
a__ : Optional[int] =None
# start main text
a__ : Union[str, Any] =[
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
a__ : int =st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
a__ : Any =st.text_input('''Enter your question here:''', '''''')
else:
a__ : Tuple =question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
a__ : str =make_support(question, source=wiki_source, method='''dense''', n_results=10)
a__ : List[Any] =make_support(question, source=wiki_source, method='''sparse''', n_results=10)
a__ : int =[]
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
a__ : int =support_list[:10]
a__ : Tuple ="""<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
a__ : Union[str, Any] =make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
a__ : Any =answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
a__ : Tuple ="""https://en.wikipedia.org/wiki/{}""".format(res[0].replace(''' ''', '''_'''))
a__ : List[Any] =res[1].strip()
if sec_titles == "":
a__ : Optional[int] ="""[{}]({})""".format(res[0], wiki_url)
else:
a__ : Optional[int] =sec_titles.split(''' & ''')
a__ : Tuple =""" & """.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style=\"font-family:arial; font-size:10pt;\">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
a__ : Dict =find_nearest_training(question)
a__ : List[Any] =nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
a__ : List[Any] =[
"""{}. {}""".format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
a__ : List[Any] ="""
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 399 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = checkpoint
_UpperCamelCase : int = {}
_UpperCamelCase : int = vae_state_dict['''encoder.conv_in.weight''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_in.bias''']
_UpperCamelCase : Tuple = vae_state_dict['''encoder.conv_out.weight''']
_UpperCamelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
_UpperCamelCase : str = vae_state_dict['''encoder.norm_out.bias''']
_UpperCamelCase : str = vae_state_dict['''decoder.conv_in.weight''']
_UpperCamelCase : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.weight''']
_UpperCamelCase : List[str] = vae_state_dict['''decoder.conv_out.bias''']
_UpperCamelCase : int = vae_state_dict['''decoder.norm_out.weight''']
_UpperCamelCase : Dict = vae_state_dict['''decoder.norm_out.bias''']
_UpperCamelCase : Optional[int] = vae_state_dict['''quant_conv.weight''']
_UpperCamelCase : int = vae_state_dict['''quant_conv.bias''']
_UpperCamelCase : List[Any] = vae_state_dict['''post_quant_conv.weight''']
_UpperCamelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
_UpperCamelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
_UpperCamelCase : Tuple = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_UpperCamelCase : Any = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
_UpperCamelCase : int = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(UpperCamelCase )
}
for i in range(UpperCamelCase ):
_UpperCamelCase : Any = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Optional[int] = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
_UpperCamelCase : Dict = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Union[str, Any] = {'''old''': f'''down.{i}.block''', '''new''': f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
_UpperCamelCase : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : Optional[int] = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
_UpperCamelCase : List[str] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Tuple = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
_UpperCamelCase : List[str] = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
for i in range(UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = num_up_blocks - 1 - i
_UpperCamelCase : Optional[int] = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
_UpperCamelCase : Tuple = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
_UpperCamelCase : Any = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
_UpperCamelCase : Any = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Any = {'''old''': f'''up.{block_id}.block''', '''new''': f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : List[Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
_UpperCamelCase : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
_UpperCamelCase : int = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
_UpperCamelCase : Optional[int] = renew_vae_resnet_paths(UpperCamelCase )
_UpperCamelCase : Optional[Any] = {'''old''': f'''mid.block_{i}''', '''new''': f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
_UpperCamelCase : Tuple = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
_UpperCamelCase : Tuple = renew_vae_attention_paths(UpperCamelCase )
_UpperCamelCase : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,additional_replacements=[meta_path] ,config=UpperCamelCase )
conv_attn_to_linear(UpperCamelCase )
return new_checkpoint
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,) -> List[str]:
# Only support V1
_UpperCamelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
_UpperCamelCase : List[Any] = io.BytesIO(r.content )
_UpperCamelCase : Optional[int] = OmegaConf.load(UpperCamelCase )
_UpperCamelCase : str = 5_12
_UpperCamelCase : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
_UpperCamelCase : str = {}
with safe_open(UpperCamelCase ,framework='''pt''' ,device='''cpu''' ) as f:
for key in f.keys():
_UpperCamelCase : Union[str, Any] = f.get_tensor(UpperCamelCase )
else:
_UpperCamelCase : str = torch.load(UpperCamelCase ,map_location=UpperCamelCase )['''state_dict''']
# Convert the VAE model.
_UpperCamelCase : Dict = create_vae_diffusers_config(UpperCamelCase ,image_size=UpperCamelCase )
_UpperCamelCase : str = custom_convert_ldm_vae_checkpoint(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Dict = AutoencoderKL(**UpperCamelCase )
vae.load_state_dict(UpperCamelCase )
vae.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_UpperCAmelCase : int = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 683 | 0 |
'''simple docstring'''
_lowerCAmelCase = range(2, 20 + 1)
_lowerCAmelCase = [10**k for k in range(ks[-1] + 1)]
_lowerCAmelCase = {}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
lowerCAmelCase__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
lowerCAmelCase__ : List[str] = 0, 0
lowerCAmelCase__ : str = n - i
lowerCAmelCase__ : int = memo.get(UpperCamelCase )
if sub_memo is not None:
lowerCAmelCase__ : Union[str, Any] = sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
lowerCAmelCase__ : int = -1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase__ : Dict = _k
break
if max_jump >= 0:
lowerCAmelCase__ : str = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase__ : Optional[int] = diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
lowerCAmelCase__ : Dict = divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
lowerCAmelCase__ : Optional[int] = []
else:
lowerCAmelCase__ : Dict = {c: []}
lowerCAmelCase__ : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase__ : Any = next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase__ : Union[str, Any] = compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
lowerCAmelCase__ : Any = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase__ : Optional[Any] = 0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase__ : str = i
lowerCAmelCase__ : Optional[int] = 0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase__ : Dict = ds_c + ds_b
diff += addend
lowerCAmelCase__ : Optional[Any] = 0
for j in range(UpperCamelCase ):
lowerCAmelCase__ : Any = a_i[j] + addend
lowerCAmelCase__ : Union[str, Any] = divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
lowerCAmelCase__ : List[str] = digits[j] + addend
if s >= 10:
lowerCAmelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
lowerCAmelCase__ : List[str] = addend // 10 + quotient
else:
lowerCAmelCase__ : List[str] = s
lowerCAmelCase__ : str = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase__ : List[Any] = divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 10**15 ):
"""simple docstring"""
lowerCAmelCase__ : str = [1]
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Union[str, Any] = 0
while True:
lowerCAmelCase__ : str = next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase__ : List[str] = 0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : int = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 644 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_UpperCAmelCase : Union[str, Any] = (720, 1280) # Height, Width
_UpperCAmelCase : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
_UpperCAmelCase : Optional[Any] = 1 / 100
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : int = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : List[Any] = 250
def snake_case__ ( ) -> None:
_UpperCamelCase, _UpperCamelCase : List[Any] = get_dataset(UpperCamelCase ,UpperCamelCase )
for index in range(UpperCamelCase ):
_UpperCamelCase : List[str] = random.sample(range(len(UpperCamelCase ) ) ,4 )
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : List[str] = update_image_and_anno(
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,filter_scale=UpperCamelCase ,)
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase : List[str] = random_chars(32 )
_UpperCamelCase : List[str] = path.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
_UpperCamelCase : Any = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' ,UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_UpperCamelCase : Any = []
for anno in new_annos:
_UpperCamelCase : List[Any] = anno[3] - anno[1]
_UpperCamelCase : int = anno[4] - anno[2]
_UpperCamelCase : int = anno[1] + width / 2
_UpperCamelCase : int = anno[2] + height / 2
_UpperCamelCase : Optional[Any] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCamelCase )
with open(f'''{file_root}.txt''' ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> tuple[list, list]:
_UpperCamelCase : List[str] = []
_UpperCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase ,'''*.txt''' ) ):
_UpperCamelCase : int = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(UpperCamelCase ) as in_file:
_UpperCamelCase : Dict = in_file.readlines()
_UpperCamelCase : Tuple = os.path.join(UpperCamelCase ,f'''{label_name}.jpg''' )
_UpperCamelCase : Tuple = []
for obj_list in obj_lists:
_UpperCamelCase : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase : Tuple = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase : Tuple = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase : List[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = 0.0 ,) -> tuple[list, list, str]:
_UpperCamelCase : Optional[int] = np.zeros([output_size[0], output_size[1], 3] ,dtype=np.uinta )
_UpperCamelCase : str = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase : Dict = int(scale_x * output_size[1] )
_UpperCamelCase : Dict = int(scale_y * output_size[0] )
_UpperCamelCase : int = []
_UpperCamelCase : Union[str, Any] = []
for i, index in enumerate(UpperCamelCase ):
_UpperCamelCase : Optional[int] = all_img_list[index]
path_list.append(UpperCamelCase )
_UpperCamelCase : str = all_annos[index]
_UpperCamelCase : Tuple = cva.imread(UpperCamelCase )
if i == 0: # top-left
_UpperCamelCase : Any = cva.resize(UpperCamelCase ,(divid_point_x, divid_point_y) )
_UpperCamelCase : Any = img
for bbox in img_annos:
_UpperCamelCase : List[Any] = bbox[1] * scale_x
_UpperCamelCase : Dict = bbox[2] * scale_y
_UpperCamelCase : Any = bbox[3] * scale_x
_UpperCamelCase : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase : Union[str, Any] = cva.resize(UpperCamelCase ,(output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase : List[Any] = img
for bbox in img_annos:
_UpperCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Optional[Any] = bbox[2] * scale_y
_UpperCamelCase : Any = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase : Dict = cva.resize(UpperCamelCase ,(divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : List[str] = img
for bbox in img_annos:
_UpperCamelCase : int = bbox[1] * scale_x
_UpperCamelCase : Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : int = bbox[3] * scale_x
_UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase : Dict = cva.resize(
UpperCamelCase ,(output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase : Union[str, Any] = img
for bbox in img_annos:
_UpperCamelCase : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case__ ( UpperCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 683 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : Any =logging.get_logger(__name__)
A_ : int ={
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
A_ : Union[str, Any] ={
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
A_ : Dict ="""</w>"""
A_ : Optional[Any] ="""@@ """
def snake_case_ ( __snake_case : str) -> Any:
lowerCAmelCase_ = set()
lowerCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ = char
return pairs
# Speech2Text2 has no max input length
A_ : Optional[Any] ={"""facebook/s2t-wav2vec2-large-en-de""": 10_24}
class __UpperCAmelCase ( a_ ):
__A : List[Any] = VOCAB_FILES_NAMES
__A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : int = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase=False , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , do_lower_case=_snake_case , **_snake_case , )
lowerCAmelCase_ = do_lower_case
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase_ = json.load(_snake_case )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
else:
with open(_snake_case , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase_ = merges_handle.read().split('''\n''' )[:-1]
lowerCAmelCase_ = [tuple(merge.split()[:2] ) for merge in merges]
lowerCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
lowerCAmelCase_ = {}
@property
def UpperCAmelCase_ ( self ):
return len(self.decoder )
def UpperCAmelCase_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ = get_pairs(_snake_case )
if not pairs:
return token
while True:
lowerCAmelCase_ = min(_snake_case , key=lambda _lowerCamelCase : self.bpe_ranks.get(_snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ = bigram
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
while i < len(_snake_case ):
try:
lowerCAmelCase_ = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ = tuple(_snake_case )
lowerCAmelCase_ = new_word
if len(_snake_case ) == 1:
break
else:
lowerCAmelCase_ = get_pairs(_snake_case )
lowerCAmelCase_ = ''' '''.join(_snake_case )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCAmelCase_ = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(_snake_case ):
lowerCAmelCase_ = word.replace(_snake_case , '''''' )
lowerCAmelCase_ = word.replace(''' ''' , _snake_case )
lowerCAmelCase_ = word
return word
def UpperCAmelCase_ ( self , _lowerCamelCase ):
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowerCAmelCase_ = text.lower()
lowerCAmelCase_ = text.split()
lowerCAmelCase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) )
return split_tokens
def UpperCAmelCase_ ( self , _lowerCamelCase ):
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = self.decoder.get(_snake_case , self.unk_token )
return result
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = ''' '''.join(_snake_case )
# make sure @@ tokens are concatenated
lowerCAmelCase_ = ''''''.join(string.split(_snake_case ) )
return string
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + '''\n''' )
lowerCAmelCase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase_ = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 274 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase ( a_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase : Optional[Any] = bertabert.config.encoder.vocab_size
_UpperCamelCase : List[str] = tokenizer.sep_token_id
_UpperCamelCase : List[str] = tokenizer.cls_token_id
_UpperCamelCase : Optional[Any] = 128
_UpperCamelCase : int = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_UpperCamelCase : Dict = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_UpperCamelCase : Dict = train_dataset.select(range(32 ) )
_UpperCamelCase : Tuple = val_dataset.select(range(16 ) )
_UpperCamelCase : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(_snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCamelCase : Optional[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_snake_case , max_length=512 )
_UpperCamelCase : Optional[int] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_snake_case , max_length=128 )
_UpperCamelCase : str = inputs.input_ids
_UpperCamelCase : Union[str, Any] = inputs.attention_mask
_UpperCamelCase : str = outputs.input_ids
_UpperCamelCase : str = outputs.input_ids.copy()
_UpperCamelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_UpperCamelCase : Union[str, Any] = outputs.attention_mask
assert all(len(_snake_case ) == 512 for x in inputs.input_ids )
assert all(len(_snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_snake_case ):
_UpperCamelCase : Dict = pred.label_ids
_UpperCamelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCamelCase : Any = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
_UpperCamelCase : int = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_snake_case ) )] ) / len(_snake_case )
return {"accuracy": accuracy}
# map train dataset
_UpperCamelCase : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_UpperCamelCase : List[Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_snake_case , batch_size=_snake_case , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_UpperCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=_snake_case , per_device_train_batch_size=_snake_case , per_device_eval_batch_size=_snake_case , predict_with_generate=_snake_case , evaluation_strategy='''steps''' , do_train=_snake_case , do_eval=_snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCamelCase : Optional[int] = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , compute_metrics=_compute_metrics , train_dataset=_snake_case , eval_dataset=_snake_case , tokenizer=_snake_case , )
# start training
trainer.train()
| 683 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase__ :
'''simple docstring'''
_snake_case = 42
_snake_case = None
_snake_case = None
snake_case_ : Dict = namedtuple('CoinsDistribResult', 'moves excess')
def __snake_case ( _UpperCAmelCase : Any):
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase : Tuple) -> int:
if node is None:
return 0
return count_nodes(node.left) + count_nodes(node.right) + 1
def count_coins(_UpperCAmelCase : Tuple) -> int:
if node is None:
return 0
return count_coins(node.left) + count_coins(node.right) + node.data
if count_nodes(_UpperCAmelCase) != count_coins(_UpperCAmelCase):
raise ValueError('''The nodes number should be same as the number of coins''')
# Main calculation
def get_distrib(_UpperCAmelCase : str) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0, 1)
UpperCamelCase = get_distrib(node.left)
UpperCamelCase = get_distrib(node.right)
UpperCamelCase = 1 - left_distrib_excess
UpperCamelCase = 1 - right_distrib_excess
UpperCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase)
+ abs(_UpperCAmelCase)
)
UpperCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase, _UpperCAmelCase)
return get_distrib(_UpperCAmelCase)[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case__ ( UpperCamelCase=None ) -> Optional[int]:
if subparsers is not None:
_UpperCamelCase : Dict = subparsers.add_parser('''env''' )
else:
_UpperCamelCase : Tuple = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=UpperCamelCase ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def snake_case__ ( UpperCamelCase ) -> Any:
_UpperCamelCase : int = torch.__version__
_UpperCamelCase : int = torch.cuda.is_available()
_UpperCamelCase : List[str] = is_xpu_available()
_UpperCamelCase : Dict = is_npu_available()
_UpperCamelCase : Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
_UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
_UpperCamelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(UpperCamelCase ),
'''PyTorch NPU available''': str(UpperCamelCase ),
'''System RAM''': f'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
_UpperCamelCase : int = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
_UpperCamelCase : Union[str, Any] = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase ,UpperCamelCase )
else f'''\t{accelerate_config}'''
)
print(UpperCamelCase )
_UpperCamelCase : str = accelerate_config
return info
def snake_case__ ( ) -> int:
_UpperCamelCase : str = env_command_parser()
_UpperCamelCase : Any = parser.parse_args()
env_command(UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 683 | 0 |
'''simple docstring'''
from math import sqrt
def snake_case__ ( _A: Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
for i in range(1 , int(sqrt(_A ) + 1 ) ):
if n % i == 0 and i != sqrt(_A ):
total += i + n // i
elif i == sqrt(_A ):
total += i
return total - n
def snake_case__ ( _A: Tuple = 10000 ) -> int:
'''simple docstring'''
lowerCAmelCase = sum(
i
for i in range(1 , _A )
if sum_of_divisors(sum_of_divisors(_A ) ) == i and sum_of_divisors(_A ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 370 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCamelCase ) -> Tuple:
_UpperCamelCase : str = '''huggingface/label-files'''
_UpperCamelCase : Optional[Any] = '''imagenet-1k-id2label.json'''
_UpperCamelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase ,UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
_UpperCamelCase : Optional[int] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : Optional[Any] = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_UpperCamelCase : Union[str, Any] = BitConfig(
conv_layer=UpperCamelCase ,num_labels=10_00 ,idalabel=UpperCamelCase ,labelaid=UpperCamelCase ,)
return config
def snake_case__ ( UpperCamelCase ) -> str:
if "stem.conv" in name:
_UpperCamelCase : Any = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase : Union[str, Any] = name.replace('''blocks''' ,'''layers''' )
if "head.fc" in name:
_UpperCamelCase : Optional[Any] = name.replace('''head.fc''' ,'''classifier.1''' )
if name.startswith('''norm''' ):
_UpperCamelCase : Any = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
_UpperCamelCase : List[Any] = '''bit.encoder.''' + name
return name
def snake_case__ ( ) -> Optional[int]:
_UpperCamelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase : List[str] = Image.open(requests.get(UpperCamelCase ,stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> List[Any]:
_UpperCamelCase : str = get_config(UpperCamelCase )
# load original model from timm
_UpperCamelCase : int = create_model(UpperCamelCase ,pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
_UpperCamelCase : int = timm_model.state_dict()
for key in state_dict.copy().keys():
_UpperCamelCase : int = state_dict.pop(UpperCamelCase )
_UpperCamelCase : Any = val.squeeze() if '''head''' in key else val
# load HuggingFace model
_UpperCamelCase : List[str] = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
_UpperCamelCase : Optional[int] = create_transform(**resolve_data_config({} ,model=UpperCamelCase ) )
_UpperCamelCase : Any = transform.transforms
_UpperCamelCase : List[str] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_UpperCamelCase : List[str] = BitImageProcessor(
do_resize=UpperCamelCase ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=UpperCamelCase ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCamelCase : str = prepare_img()
_UpperCamelCase : Dict = transform(UpperCamelCase ).unsqueeze(0 )
_UpperCamelCase : Dict = processor(UpperCamelCase ,return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase ,UpperCamelCase )
# verify logits
with torch.no_grad():
_UpperCamelCase : Optional[int] = model(UpperCamelCase )
_UpperCamelCase : Optional[int] = outputs.logits
print('''Logits:''' ,logits[0, :3] )
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1 ).item()] )
_UpperCamelCase : List[Any] = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase ,outputs.logits ,atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 683 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase_ : Optional[int] = 2
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Dict , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> Union[str, Any]:
a_ : Union[str, Any] = bos, unk, pad, eos
a_ : Optional[Any] = []
a_ : Tuple = []
a_ : str = {}
a_ : List[str] = self.add_symbol(_snake_case )
a_ : Any = self.add_symbol(_snake_case )
a_ : Tuple = self.add_symbol(_snake_case )
a_ : List[Any] = self.add_symbol(_snake_case )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_snake_case )
a_ : str = len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]:
return self.indices == other.indices
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> Dict:
return len(self.symbols )
def __contains__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> str:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
a_ : int = cls()
d.add_from_file(_snake_case )
return d
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> List[str]:
if word in self.indices and not overwrite:
a_ : int = self.indices[word]
a_ : List[Any] = self.count[idx] + n
return idx
else:
a_ : List[str] = len(self.symbols )
a_ : str = idx
self.symbols.append(_snake_case )
self.count.append(_snake_case )
return idx
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
return 0
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
if isinstance(_snake_case , _snake_case ):
try:
with open(_snake_case , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(_snake_case )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(_snake_case ) )
return
a_ : str = f.readlines()
a_ : Tuple = self._load_meta(_snake_case )
for line in lines[indices_start_line:]:
try:
a_ : Optional[int] = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
a_ : Optional[int] = True
a_ : Optional[int] = line.rsplit(' ' , 1 )
else:
a_ : Tuple = False
a_ : List[str] = int(_snake_case )
a_ : Union[str, Any] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(_snake_case ) )
self.add_symbol(_snake_case , n=_snake_case , overwrite=_snake_case )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
a_ : List[Any] = dict((re.sub(R'@@$' , '' , __A ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __A ), v) for k, v in d.items() )
a_ : List[Any] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
a_ : Optional[Any] = d[k] # restore
return da
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : str ) -> Tuple:
"""simple docstring"""
if not os.path.exists(__A ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__A , exist_ok=__A )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
a_ : Union[str, Any] = os.path.join(__A , 'checkpoint.pt' )
if not os.path.isfile(__A ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
a_ : Union[str, Any] = torch.load(__A , map_location='cpu' )
a_ : List[str] = chkpt['''cfg''']['''model''']
# dicts
a_ : Union[str, Any] = os.path.join(__A , 'dict.txt' )
if not os.path.isfile(__A ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
a_ : Optional[int] = Dictionary.load(__A )
a_ : List[str] = rewrite_dict_keys(src_dict.indices )
a_ : Any = len(__A )
a_ : List[Any] = os.path.join(__A , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# merges_file (bpecodes)
a_ : Tuple = os.path.join(__A , 'bpecodes' )
if not os.path.isfile(__A ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
a_ : int = os.path.join(__A , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__A , __A )
# model config
a_ : str = os.path.join(__A , 'config.json' )
a_ : List[Any] = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-1_2,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# tokenizer config
a_ : Optional[Any] = os.path.join(__A , __A )
a_ : int = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# model
a_ : Union[str, Any] = chkpt['''model''']
# remove unneeded keys
a_ : List[str] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(__A , __A )
a_ : List[Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
a_ : List[str] = model_state_dict.pop(__A )
else:
a_ : List[str] = model_state_dict.pop(__A )
a_ : Any = BioGptConfig.from_pretrained(__A )
a_ : List[str] = BioGptForCausalLM(__A )
# check that it loads ok
model_new.load_state_dict(__A )
# save
a_ : int = os.path.join(__A , __A )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__A , __A )
print('Conversion is done!' )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 570 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : int , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 50 , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __UpperCamelCase ):
_UpperCAmelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCAmelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_UpperCAmelCase = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , eta=__UpperCamelCase , use_clipped_model_output=__UpperCamelCase , generator=__UpperCamelCase ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 684 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 1 |
__lowerCAmelCase = [0, 2, 4, 6, 8]
__lowerCAmelCase = [1, 3, 5, 7, 9]
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase = 0
for digit in range(10 ):
_UpperCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCAmelCase , _lowerCAmelCase )
return result
_UpperCAmelCase = 0
for digita in range(10 ):
_UpperCAmelCase = digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase = ODD_DIGITS
else:
_UpperCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCAmelCase , _lowerCAmelCase , )
return result
def __lowerCamelCase ( _lowerCAmelCase = 9 ) -> int:
_UpperCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCAmelCase , 0 , [0] * length , _lowerCAmelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = """distilbert"""
__SCREAMING_SNAKE_CASE : Optional[int] = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self : Dict , __UpperCamelCase : str=30_522 , __UpperCamelCase : int=512 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : List[str]=6 , __UpperCamelCase : Any=12 , __UpperCamelCase : str=768 , __UpperCamelCase : Dict=4 * 768 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : str=0.02 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Optional[Any]=0.2 , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : Union[str, Any] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
@property
def UpperCAmelCase__ ( self : Tuple ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 684 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
_UpperCAmelCase = []
def generate(_lowerCAmelCase , _lowerCAmelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _lowerCAmelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_UpperCAmelCase , _UpperCAmelCase = arr[k - 1], arr[i]
else: # k is odd
_UpperCAmelCase , _UpperCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , _lowerCAmelCase )
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : int = ["""pixel_values"""]
def __init__( self : Optional[Any] , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Union[int, float] = 1 / 255 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : List[str] , ):
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {"height": 256, "width": 256}
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name="crop_size" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : int , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PIL.Image.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , ):
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
__UpperCamelCase , size=(size["height"], size["width"]) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[Any] , ):
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : int , ):
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ):
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : Optional[int] , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name="crop_size" )
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 684 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> int:
_UpperCAmelCase = "backbone." if is_semantic else ""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(F'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(F'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(F'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
_UpperCAmelCase = "backbone." if is_semantic else ""
# queries, keys and values
_UpperCAmelCase = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
_UpperCAmelCase = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = q_bias
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_UpperCAmelCase = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
_UpperCAmelCase = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
_UpperCAmelCase = gamma_a
_UpperCAmelCase = gamma_a
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = dct.pop(_lowerCAmelCase )
_UpperCAmelCase = val
def __lowerCamelCase ( ) -> int:
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]:
_UpperCAmelCase = False if "rvlcdip" in checkpoint_url else True
_UpperCAmelCase = BeitConfig(use_absolute_position_embeddings=_lowerCAmelCase , use_mask_token=_lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_UpperCAmelCase = 1_024
_UpperCAmelCase = 4_096
_UpperCAmelCase = 24
_UpperCAmelCase = 16
# labels
if "rvlcdip" in checkpoint_url:
_UpperCAmelCase = 16
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "rvlcdip-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )["model"]
_UpperCAmelCase = create_rename_keys(_lowerCAmelCase , has_lm_head=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , has_lm_head=_lowerCAmelCase )
# load HuggingFace model
_UpperCAmelCase = BeitForMaskedImageModeling(_lowerCAmelCase ) if has_lm_head else BeitForImageClassification(_lowerCAmelCase )
model.eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
_UpperCAmelCase = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_UpperCAmelCase = encoding["pixel_values"]
_UpperCAmelCase = model(_lowerCAmelCase )
_UpperCAmelCase = outputs.logits
# verify logits
_UpperCAmelCase = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(_lowerCAmelCase ), "Shape of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
if has_lm_head:
_UpperCAmelCase = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
_UpperCAmelCase = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__lowerCAmelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 684 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __SCREAMING_SNAKE_CASE ( lowercase):
def __lt__( self : str , __UpperCamelCase : Union[str, Any] ):
return self[-1] < other[-1]
def __eq__( self : Union[str, Any] , __UpperCamelCase : Optional[int] ):
return self[-1] == other[-1]
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = []
# sort into stacks
for element in collection:
_UpperCAmelCase = Stack([element] )
_UpperCAmelCase = bisect_left(_lowerCAmelCase , _lowerCAmelCase )
if i != len(_lowerCAmelCase ):
stacks[i].append(_lowerCAmelCase )
else:
stacks.append(_lowerCAmelCase )
# use a heap-based merge to merge stack efficiently
_UpperCAmelCase = merge(*(reversed(_lowerCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 684 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 1 |
import pytest
__lowerCAmelCase = "__dummy_dataset1__"
__lowerCAmelCase = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def __lowerCamelCase ( ) -> Any:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCamelCase ( ) -> List[Any]:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_UpperCAmelCase = dataset_loading_script_name
_UpperCAmelCase = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=_lowerCAmelCase )
_UpperCAmelCase = script_dir / F'''{script_name}.py'''
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
| 684 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 1 |
import numpy as np
def __lowerCamelCase ( _lowerCAmelCase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : int=None , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = parent
_UpperCAmelCase = config_class
_UpperCAmelCase = has_text_modality
_UpperCAmelCase = kwargs
_UpperCAmelCase = common_properties
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(__UpperCamelCase ):
try:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__UpperCamelCase ):
try:
_UpperCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "config.json" )
config_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.config_class.from_json_file(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = self.config_class.from_pretrained(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
config_first.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = self.config_class.from_pretrained(__UpperCamelCase , subfolder=__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_UpperCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase__ ( self : int ):
if self.config_class.is_composition:
return
_UpperCAmelCase = self.config_class()
self.parent.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
_UpperCAmelCase = self.config_class(**__UpperCamelCase )
_UpperCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__UpperCamelCase , __UpperCamelCase ) != value:
wrong_values.append((key, getattr(__UpperCamelCase , __UpperCamelCase ), value) )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = "\n".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def UpperCAmelCase__ ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCAmelCase = 1_6
__lowerCAmelCase = 3_2
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 16 , _lowerCAmelCase = "bert-base-cased" ) -> List[str]:
_UpperCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
model.eval()
_UpperCAmelCase = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**_lowerCAmelCase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCAmelCase ) - 1:
_UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
_UpperCAmelCase = metric.compute()
return eval_metric["accuracy"]
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
# Initialize accelerator
_UpperCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["lr"]
_UpperCAmelCase = int(config["num_epochs"] )
_UpperCAmelCase = int(config["seed"] )
_UpperCAmelCase = int(config["batch_size"] )
_UpperCAmelCase = args.model_name_or_path
set_seed(_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase = 1
_UpperCAmelCase = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , )
else:
_UpperCAmelCase = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase = 0
_UpperCAmelCase = evaluate.load("glue" , "mrpc" )
_UpperCAmelCase = num_epochs
if args.partial_train_epoch is not None:
_UpperCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_UpperCAmelCase = args.resume_from_checkpoint.split("epoch_" )[1]
_UpperCAmelCase = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_UpperCAmelCase = int(_lowerCAmelCase ) + 1
_UpperCAmelCase = evaluation_loop(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
accelerator.print("resumed checkpoint performance:" , _lowerCAmelCase )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , "r" ) as f:
_UpperCAmelCase = json.load(_lowerCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_UpperCAmelCase = {}
for epoch in range(_lowerCAmelCase , _lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
_UpperCAmelCase = model(**_lowerCAmelCase )
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_UpperCAmelCase = F'''epoch_{epoch}'''
_UpperCAmelCase = os.path.join(args.output_dir , _lowerCAmelCase )
accelerator.save_state(_lowerCAmelCase )
_UpperCAmelCase = evaluation_loop(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = accuracy
_UpperCAmelCase = lr_scheduler.get_lr()[0]
_UpperCAmelCase = optimizer.param_groups[0]["lr"]
_UpperCAmelCase = epoch
_UpperCAmelCase = overall_step
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCamelCase ( ) -> Dict:
_UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=_lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_lowerCAmelCase , default=2 , help="Number of train epochs." , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 684 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[Any] = """"""
__SCREAMING_SNAKE_CASE : Optional[int] = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : List[str] , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Tuple , ):
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def UpperCAmelCase__ ( self : List[Any] ):
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ):
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Dict , **__UpperCamelCase : List[str] ):
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any]=False , **__UpperCamelCase : List[Any] ):
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("/" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("/" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
__lowerCAmelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__lowerCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__lowerCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 684 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Tuple = XLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = "lower newer"
return input_text, output_text
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = XLMTokenizer(self.vocab_file , self.merges_file )
_UpperCAmelCase = "lower"
_UpperCAmelCase = ["low", "er</w>"]
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + ["<unk>"]
_UpperCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
_UpperCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[int] , __UpperCamelCase : List[str]="" , __UpperCamelCase : Optional[Any]="train" ):
assert os.path.isdir(__UpperCamelCase )
_UpperCAmelCase = []
_UpperCAmelCase = os.listdir(__UpperCamelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
if not os.path.isfile(__UpperCamelCase ):
continue
self.documents.append(__UpperCamelCase )
def __len__( self : int ):
return len(self.documents )
def __getitem__( self : Optional[int] , __UpperCamelCase : Dict ):
_UpperCAmelCase = self.documents[idx]
_UpperCAmelCase = document_path.split("/" )[-1]
with open(__UpperCamelCase , encoding="utf-8" ) as source:
_UpperCAmelCase = source.read()
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
return document_name, story_lines, summary_lines
def __lowerCamelCase ( _lowerCAmelCase ) -> List[Any]:
_UpperCAmelCase = list(filter(lambda _lowerCAmelCase : len(_lowerCAmelCase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
_UpperCAmelCase = [_add_missing_period(_lowerCAmelCase ) for line in nonempty_lines]
# gather article lines
_UpperCAmelCase = []
_UpperCAmelCase = deque(_lowerCAmelCase )
while True:
try:
_UpperCAmelCase = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(_lowerCAmelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_UpperCAmelCase = list(filter(lambda _lowerCAmelCase : not t.startswith("@highlight" ) , _lowerCAmelCase ) )
return story_lines, summary_lines
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowerCAmelCase )) )
return sequence
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.ones_like(_lowerCAmelCase )
_UpperCAmelCase = sequence == pad_token_id
_UpperCAmelCase = 0
return mask
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = [tokenizer.encode(_lowerCAmelCase ) for line in story_lines]
_UpperCAmelCase = [token for sentence in story_lines_token_ids for token in sentence]
_UpperCAmelCase = [tokenizer.encode(_lowerCAmelCase ) for line in summary_lines]
_UpperCAmelCase = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = []
for sequence in batch:
_UpperCAmelCase = -1
_UpperCAmelCase = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowerCAmelCase )
return torch.tensor(_lowerCAmelCase )
| 684 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 1 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str]=13 , __UpperCamelCase : Dict=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : Union[str, Any]=99 , __UpperCamelCase : str=32 , __UpperCamelCase : int=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Dict=512 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : int=0.02 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Any=4 , __UpperCamelCase : Any=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Tuple ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any ):
_UpperCAmelCase = NystromformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = NystromformerForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : str ):
_UpperCAmelCase = NystromformerForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NystromformerForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NystromformerForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Tuple = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Tuple = False
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = NystromformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : str ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NystromformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = "the [MASK] of Belgium is Brussels"
_UpperCAmelCase = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="pt" )
with torch.no_grad():
_UpperCAmelCase = model(encoding.input_ids ).logits
_UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , "capital" )
| 684 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 1 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__lowerCAmelCase = _symbol_database.Default()
__lowerCAmelCase = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
__lowerCAmelCase = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__lowerCAmelCase = None
__lowerCAmelCase = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__lowerCAmelCase = 4_5
__lowerCAmelCase = 1_5_8_1
__lowerCAmelCase = 1_5_1_7
__lowerCAmelCase = 1_5_7_0
__lowerCAmelCase = 1_5_8_4
__lowerCAmelCase = 1_7_9_3
__lowerCAmelCase = 1_7_9_5
__lowerCAmelCase = 1_9_1_6
__lowerCAmelCase = 1_8_6_4
__lowerCAmelCase = 1_9_0_5
__lowerCAmelCase = 1_9_1_9
__lowerCAmelCase = 2_4_2_9
__lowerCAmelCase = 2_2_0_8
__lowerCAmelCase = 2_4_1_8
__lowerCAmelCase = 2_3_2_3
__lowerCAmelCase = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 684 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 1 |
from __future__ import annotations
from typing import Any
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
create_state_space_tree(_lowerCAmelCase , [] , 0 )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
if index == len(_lowerCAmelCase ):
print(_lowerCAmelCase )
return
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_lowerCAmelCase , _lowerCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowerCAmelCase = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 684 |
import requests
__lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( _lowerCAmelCase ) -> None:
# fetching a list of articles in json format
_UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 684 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCamelCase ( ) -> Optional[int]:
_UpperCAmelCase = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCAmelCase )
return parser.parse_args()
def __lowerCamelCase ( ) -> Optional[int]:
_UpperCAmelCase = parse_args()
# Import training_script as a module.
_UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase = script_fpath.stem
_UpperCAmelCase = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
_UpperCAmelCase = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 684 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = 10
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = [1, 2, 3, 4]
_UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , [] )
self.assertEqual(__UpperCamelCase , [] )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
_UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase )
_UpperCAmelCase = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["It was the best of times."]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = 101
_UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase )
np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
| 684 | 1 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowerCAmelCase = "\nimport os\n"
__lowerCAmelCase = "\ndef foo():\n import os\n return False\n"
__lowerCAmelCase = "\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
__lowerCAmelCase = "\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
__lowerCAmelCase = "\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
__lowerCAmelCase = "\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
__lowerCAmelCase = "\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
__lowerCAmelCase = "\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
__lowerCAmelCase = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
__lowerCAmelCase = "\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
__lowerCAmelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , _lowerCAmelCase )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_UpperCAmelCase = os.path.join(_lowerCAmelCase , "test_file.py" )
with open(_lowerCAmelCase , "w" ) as _tmp_file:
_tmp_file.write(_lowerCAmelCase )
_UpperCAmelCase = get_imports(_lowerCAmelCase )
assert parsed_imports == ["os"]
| 684 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCAmelCase = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["DPTFeatureExtractor"]
__lowerCAmelCase = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 684 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = job["started_at"]
_UpperCAmelCase = job["completed_at"]
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = date_parser.parse(_lowerCAmelCase )
_UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = duration_in_min
return job_info
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
_UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = get_job_time(args.workflow_run_id)
__lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
assert x is not None
assert y is not None
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
# declaring the array for storing the dp values
_UpperCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_UpperCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
_UpperCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_UpperCAmelCase = ""
_UpperCAmelCase , _UpperCAmelCase = m, n
while i > 0 and j > 0:
_UpperCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_UpperCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__lowerCAmelCase = "AGGTAB"
__lowerCAmelCase = "GXTXAYB"
__lowerCAmelCase = 4
__lowerCAmelCase = "GTAB"
__lowerCAmelCase , __lowerCAmelCase = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 684 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCAmelCase = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2
_UpperCAmelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : Optional[int] ):
super().__init__()
_UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 )
_UpperCAmelCase = deepcopy(self.diffusion )
_UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase )
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__lowerCAmelCase = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__lowerCAmelCase = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__lowerCAmelCase = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__lowerCAmelCase = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__lowerCAmelCase = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return name.replace(_lowerCAmelCase , _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]:
_UpperCAmelCase = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
_UpperCAmelCase = 0
if string.startswith("net.3." ):
depth += 1
_UpperCAmelCase = string[6:]
elif string.startswith("net." ):
_UpperCAmelCase = string[4:]
while string.startswith("main.7." ):
depth += 1
_UpperCAmelCase = string[7:]
if string.startswith("main." ):
_UpperCAmelCase = string[5:]
# mid block
if string[:2].isdigit():
_UpperCAmelCase = string[:2]
_UpperCAmelCase = string[2:]
else:
_UpperCAmelCase = string[0]
_UpperCAmelCase = string[1:]
if depth == max_depth:
_UpperCAmelCase = MID_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = "mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
_UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''down_blocks.{depth}'''
elif depth > 0 and int(_lowerCAmelCase ) > 7:
_UpperCAmelCase = UP_NUM_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
_UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num]
_UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
_UpperCAmelCase = string_left[1:]
if "resnets" in new_layer:
_UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
_UpperCAmelCase = convert_attn_naming(_lowerCAmelCase )
_UpperCAmelCase = new_string_left
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = prefix + "." + new_layer + "." + string_left
else:
_UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]:
_UpperCAmelCase = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
_UpperCAmelCase = rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_UpperCAmelCase = v
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
_UpperCAmelCase = v[:, :, 0]
else:
# bias
_UpperCAmelCase = v
else:
# qkv matrices
_UpperCAmelCase = v.shape[0]
_UpperCAmelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple:
_UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
_UpperCAmelCase = download(_lowerCAmelCase )
_UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"]
_UpperCAmelCase = MODELS_MAP[model_name]["sample_size"]
_UpperCAmelCase = Object()
_UpperCAmelCase = sample_size
_UpperCAmelCase = sample_rate
_UpperCAmelCase = 0
_UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase )
_UpperCAmelCase = diffusers_model.state_dict()
_UpperCAmelCase = DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] )
_UpperCAmelCase = orig_model.diffusion_ema.eval()
_UpperCAmelCase = orig_model.state_dict()
_UpperCAmelCase = rename_orig_weights(_lowerCAmelCase )
_UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
_UpperCAmelCase = value.squeeze()
_UpperCAmelCase = value
diffusers_model.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase = 100
_UpperCAmelCase = 33
_UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(_lowerCAmelCase )
_UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase )
_UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1]
_UpperCAmelCase = get_crash_schedule(_lowerCAmelCase )
_UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(33 )
_UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios
_UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} )
_UpperCAmelCase = generated.clamp(-1 , 1 )
_UpperCAmelCase = (generated - audio).abs().sum()
_UpperCAmelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _lowerCAmelCase )
print("Diff max" , _lowerCAmelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__lowerCAmelCase = parser.parse_args()
main(args)
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = [1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 0, 0
_UpperCAmelCase = ugly_nums[ia] * 2
_UpperCAmelCase = ugly_nums[ia] * 3
_UpperCAmelCase = ugly_nums[ia] * 5
for _ in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
ugly_nums.append(_lowerCAmelCase )
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_0_0) = }''')
| 684 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowerCAmelCase = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase = mock.Mock()
_UpperCAmelCase = 500
_UpperCAmelCase = {}
_UpperCAmelCase = HTTPError
_UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head:
_UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ ( self : List[Any] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase__ ( self : Dict ):
with self.assertRaises(__UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCamelCase )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : str ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) )
def UpperCAmelCase__ ( self : int ):
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 684 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : Any = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = (3, 32, 128)
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
_UpperCAmelCase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
_UpperCAmelCase = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] , **__UpperCamelCase : List[Any] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , **__UpperCamelCase : List[Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_UpperCAmelCase = Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
_UpperCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(__UpperCamelCase , return_tensors="np" )
_UpperCAmelCase = processor(images=__UpperCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = "test"
_UpperCAmelCase = processor(text=__UpperCamelCase )
_UpperCAmelCase = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = "test"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.char_decode(__UpperCamelCase )
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
_UpperCAmelCase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = None
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase__ ( self : int ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = torch.randn(1 , 27 , 38 )
_UpperCAmelCase = torch.randn(1 , 27 , 50_257 )
_UpperCAmelCase = torch.randn(1 , 27 , 30_522 )
_UpperCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 684 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase = 600_851_475_143 ) -> int:
try:
_UpperCAmelCase = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_UpperCAmelCase = 2
_UpperCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_UpperCAmelCase = i
while n % i == 0:
_UpperCAmelCase = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> list:
_UpperCAmelCase = len(_lowerCAmelCase )
for i in range(1 , _lowerCAmelCase ):
_UpperCAmelCase = collection[i]
_UpperCAmelCase = 0
_UpperCAmelCase = i - 1
while low <= high:
_UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ):
_UpperCAmelCase = collection[j - 1]
_UpperCAmelCase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 684 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCAmelCase = "main"
# Default branch name
__lowerCAmelCase = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCAmelCase = "aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCAmelCase = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCAmelCase = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Dict:
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> int:
print("Bonjour!" )
yield
print("Au revoir!" )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Optional[Any] ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple ):
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : str ):
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] ):
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels"] )
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__UpperCamelCase ) , ["start_positions", "end_positions"] )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels"] )
@require_tf
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels"] )
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(__UpperCamelCase ) , ["start_positions", "end_positions"] )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , ["labels"] )
@require_flax
def UpperCAmelCase__ ( self : Tuple ):
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
class __SCREAMING_SNAKE_CASE ( lowercase):
pass
self.assertEqual(find_labels(__UpperCamelCase ) , [] )
| 684 |
__lowerCAmelCase = 2_5_6
# Modulus to hash a string
__lowerCAmelCase = 1_0_0_0_0_0_3
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool:
_UpperCAmelCase = len(_lowerCAmelCase )
_UpperCAmelCase = len(_lowerCAmelCase )
if p_len > t_len:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCAmelCase ):
_UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowerCamelCase ( ) -> None:
_UpperCAmelCase = "abc1abc12"
_UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCAmelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 2)
_UpperCAmelCase = "ABABX"
_UpperCAmelCase = "ABABZABABYABABX"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 3)
_UpperCAmelCase = "AAAB"
_UpperCAmelCase = "ABAAAAAB"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 4)
_UpperCAmelCase = "abcdabcy"
_UpperCAmelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
# Test 5)
_UpperCAmelCase = "Lü"
_UpperCAmelCase = "Lüsai"
assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = "Lue"
assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 684 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCamelCase ( ) -> int:
_UpperCAmelCase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_UpperCAmelCase = bs[:]
_UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : Optional[int]="</s>" , __UpperCamelCase : str="<s>" , __UpperCamelCase : Tuple="<unk>" , __UpperCamelCase : int="<pad>" , __UpperCamelCase : Union[str, Any]="<mask>" , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : Optional[Any] , ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase = json.load(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = errors # how to handle errors in decoding
_UpperCAmelCase = bytes_to_unicode()
_UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("\n" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = {}
_UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Optional[int] ):
return len(self.encoder )
def UpperCAmelCase__ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any] ):
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCAmelCase = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = word
return word
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str ):
_UpperCAmelCase = []
for token in re.findall(self.pat , __UpperCamelCase ):
_UpperCAmelCase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(" " ) )
return bpe_tokens
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] ):
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] ):
return self.decoder.get(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = "".join(__UpperCamelCase )
_UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
_UpperCAmelCase = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_UpperCAmelCase = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1]
def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str=False , **__UpperCamelCase : Optional[int] ):
_UpperCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()):
_UpperCAmelCase = " " + text
return (text, kwargs)
def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__lowerCAmelCase = "src/transformers"
__lowerCAmelCase = "docs/source/en"
__lowerCAmelCase = "."
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
with open(_lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start prompt.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__lowerCAmelCase = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = 2 if text == "✅" or text == "❌" else len(_lowerCAmelCase )
_UpperCAmelCase = (width - text_length) // 2
_UpperCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __lowerCamelCase ( ) -> int:
_UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCAmelCase = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCAmelCase = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCAmelCase ):
_UpperCAmelCase = None
if attr_name.endswith("Tokenizer" ):
_UpperCAmelCase = slow_tokenizers
_UpperCAmelCase = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_UpperCAmelCase = fast_tokenizers
_UpperCAmelCase = attr_name[:-13]
elif _re_tf_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase = tf_models
_UpperCAmelCase = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase = flax_models
_UpperCAmelCase = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase = pt_models
_UpperCAmelCase = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCAmelCase = True
break
# Try again after removing the last word in the name
_UpperCAmelCase = "".join(camel_case_split(_lowerCAmelCase )[:-1] )
# Let's build that table!
_UpperCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCAmelCase = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCAmelCase = [len(_lowerCAmelCase ) + 2 for c in columns]
_UpperCAmelCase = max([len(_lowerCAmelCase ) for name in model_names] ) + 2
# Build the table per se
_UpperCAmelCase = "|" + "|".join([_center_text(_lowerCAmelCase , _lowerCAmelCase ) for c, w in zip(_lowerCAmelCase , _lowerCAmelCase )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_UpperCAmelCase = {True: "✅", False: "❌"}
for name in model_names:
_UpperCAmelCase = model_name_to_prefix[name]
_UpperCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCAmelCase , _lowerCAmelCase ) for l, w in zip(_lowerCAmelCase , _lowerCAmelCase )] ) + "|\n"
return table
def __lowerCamelCase ( _lowerCAmelCase=False ) -> str:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
_UpperCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCAmelCase , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCAmelCase = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 684 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: "
__lowerCAmelCase = "huggingface-tools/default-prompts"
__lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 684 | 1 |
from math import sqrt
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = 0
for i in range(1 , int(sqrt(_lowerCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(_lowerCAmelCase ):
total += i + n // i
elif i == sqrt(_lowerCAmelCase ):
total += i
return total - n
def __lowerCamelCase ( _lowerCAmelCase = 10_000 ) -> int:
_UpperCAmelCase = sum(
i
for i in range(1 , _lowerCAmelCase )
if sum_of_divisors(sum_of_divisors(_lowerCAmelCase ) ) == i and sum_of_divisors(_lowerCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 684 |
from itertools import permutations
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int:
return sum(
int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) )
for num in permutations(range(_lowerCAmelCase ) )
if is_substring_divisible(_lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
assert column_title.isupper()
_UpperCAmelCase = 0
_UpperCAmelCase = len(_lowerCAmelCase ) - 1
_UpperCAmelCase = 0
while index >= 0:
_UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 684 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 | 1 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : str=8 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : Any=99 , __UpperCamelCase : Dict=16 , __UpperCamelCase : Optional[int]=5 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[Any]=36 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : str=512 , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Any=2 , __UpperCamelCase : Optional[Any]=0.02 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : str=4 , __UpperCamelCase : Tuple=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : str ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = 300
return config
def UpperCAmelCase__ ( self : List[Any] ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ):
_UpperCAmelCase = MraModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , ):
_UpperCAmelCase = True
_UpperCAmelCase = MraModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ):
_UpperCAmelCase = MraForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
_UpperCAmelCase = MraForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MraForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MraForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : int ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MraForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : int = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : int = ()
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = MraModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : Any ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Tuple ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MraModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason="MRA does not output attentions" )
def UpperCAmelCase__ ( self : Tuple ):
return
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@slow
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
_UpperCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = 50_265
_UpperCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
_UpperCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = 50_265
_UpperCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
| 684 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = """gptsan-japanese"""
__SCREAMING_SNAKE_CASE : Tuple = [
"""past_key_values""",
]
__SCREAMING_SNAKE_CASE : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , __UpperCamelCase : Dict=36_000 , __UpperCamelCase : Dict=1_280 , __UpperCamelCase : Optional[Any]=1_024 , __UpperCamelCase : Optional[Any]=8_192 , __UpperCamelCase : List[Any]=4_096 , __UpperCamelCase : Tuple=128 , __UpperCamelCase : Tuple=10 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : Any=16 , __UpperCamelCase : int=128 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Tuple=1e-5 , __UpperCamelCase : Any=False , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : int="float32" , __UpperCamelCase : int=False , __UpperCamelCase : List[Any]=False , __UpperCamelCase : List[str]=False , __UpperCamelCase : Union[str, Any]=0.002 , __UpperCamelCase : Any=False , __UpperCamelCase : str=True , __UpperCamelCase : List[Any]=35_998 , __UpperCamelCase : Union[str, Any]=35_995 , __UpperCamelCase : List[str]=35_999 , **__UpperCamelCase : List[str] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = d_ff
_UpperCAmelCase = d_ext
_UpperCAmelCase = d_spout
_UpperCAmelCase = num_switch_layers
_UpperCAmelCase = num_ext_layers
_UpperCAmelCase = num_switch_layers + num_ext_layers
_UpperCAmelCase = num_heads
_UpperCAmelCase = num_experts
_UpperCAmelCase = expert_capacity
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = router_bias
_UpperCAmelCase = router_jitter_noise
_UpperCAmelCase = router_dtype
_UpperCAmelCase = router_ignore_padding_tokens
_UpperCAmelCase = output_hidden_states
_UpperCAmelCase = output_attentions
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = output_router_logits
_UpperCAmelCase = use_cache
super().__init__(
separator_token_id=__UpperCamelCase , pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
| 684 |
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = set({"(", "[", "{"} )
_UpperCAmelCase = set({")", "]", "}"} )
_UpperCAmelCase = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowerCAmelCase ) == 0
def __lowerCamelCase ( ) -> str:
_UpperCAmelCase = input("Enter sequence of brackets: " )
if is_balanced(_lowerCAmelCase ):
print(_lowerCAmelCase , "is balanced" )
else:
print(_lowerCAmelCase , "is not balanced" )
if __name__ == "__main__":
main()
| 684 | 1 |
from collections.abc import Callable
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __UpperCamelCase : Callable | None = None ):
# Stores actual heap items.
_UpperCAmelCase = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase = {}
# Stores current size of heap.
_UpperCAmelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase = key or (lambda __UpperCamelCase : x)
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : int ):
return int((i - 1) / 2 ) if i > 0 else None
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int ):
_UpperCAmelCase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : int ):
_UpperCAmelCase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : int ):
_UpperCAmelCase , _UpperCAmelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase , _UpperCAmelCase = self.arr[j], self.arr[i]
def UpperCAmelCase__ ( self : int , __UpperCamelCase : int , __UpperCamelCase : int ):
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : int ):
_UpperCAmelCase = self._left(__UpperCamelCase )
_UpperCAmelCase = self._right(__UpperCamelCase )
_UpperCAmelCase = i
if left is not None and not self._cmp(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = left
if right is not None and not self._cmp(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = right
return valid_parent
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int ):
_UpperCAmelCase = self._parent(__UpperCamelCase )
while parent is not None and not self._cmp(__UpperCamelCase , __UpperCamelCase ):
self._swap(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = parent, self._parent(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : int ):
_UpperCAmelCase = self._get_valid_parent(__UpperCamelCase )
while valid_parent != index:
self._swap(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = valid_parent, self._get_valid_parent(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int ):
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
_UpperCAmelCase = [item, self.key(__UpperCamelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__UpperCamelCase )
self._heapify_down(__UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : int ):
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase = self.arr[self.size - 1]
_UpperCAmelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__UpperCamelCase )
self._heapify_down(__UpperCamelCase )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : int , __UpperCamelCase : int ):
_UpperCAmelCase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__UpperCamelCase )] )
else:
_UpperCAmelCase = [item, self.key(__UpperCamelCase )]
_UpperCAmelCase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCAmelCase__ ( self : Dict ):
return self.arr[0] if self.size else None
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowerCAmelCase = pytest.mark.integration
@require_faiss
class __SCREAMING_SNAKE_CASE ( lowercase):
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__UpperCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCAmelCase__ ( self : Any ):
import faiss
_UpperCAmelCase = self._create_dummy_dataset()
_UpperCAmelCase = dset.map(
lambda __UpperCamelCase , __UpperCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCamelCase , keep_in_memory=__UpperCamelCase )
_UpperCAmelCase = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def UpperCAmelCase__ ( self : Dict ):
import faiss
_UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def UpperCAmelCase__ ( self : int ):
import faiss
_UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__UpperCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase__ ( self : Tuple ):
from elasticsearch import Elasticsearch
_UpperCAmelCase = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_UpperCAmelCase = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
_UpperCAmelCase = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class __SCREAMING_SNAKE_CASE ( lowercase):
def UpperCAmelCase__ ( self : str ):
import faiss
_UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
_UpperCAmelCase = 1
_UpperCAmelCase , _UpperCAmelCase = index.search(__UpperCamelCase )
self.assertRaises(__UpperCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_UpperCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
_UpperCAmelCase , _UpperCAmelCase = index.search_batch(__UpperCamelCase )
self.assertRaises(__UpperCamelCase , index.search_batch , queries[0] )
_UpperCAmelCase = [scores[0] for scores in total_scores]
_UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
import faiss
_UpperCAmelCase = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_UpperCAmelCase = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase__ ( self : Dict ):
import faiss
_UpperCAmelCase = faiss.IndexFlat(5 )
_UpperCAmelCase = FaissIndex(custom_index=__UpperCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase__ ( self : Optional[int] ):
import faiss
_UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCamelCase ) as tmp_file:
index.save(tmp_file.name )
_UpperCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
_UpperCAmelCase = 1
_UpperCAmelCase , _UpperCAmelCase = index.search(__UpperCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
import faiss
_UpperCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_UpperCAmelCase = "index.faiss"
_UpperCAmelCase = F'''mock://{index_name}'''
index.save(_lowerCAmelCase , storage_options=mockfs.storage_options )
_UpperCAmelCase = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options )
_UpperCAmelCase = np.zeros(5 , dtype=np.floataa )
_UpperCAmelCase = 1
_UpperCAmelCase , _UpperCAmelCase = index.search(_lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __SCREAMING_SNAKE_CASE ( lowercase):
def UpperCAmelCase__ ( self : Dict ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
_UpperCAmelCase = Elasticsearch()
_UpperCAmelCase = {"acknowledged": True}
_UpperCAmelCase = ElasticSearchIndex(es_client=__UpperCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
_UpperCAmelCase = "foo"
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search(__UpperCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_UpperCAmelCase = "foo"
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search(__UpperCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_UpperCAmelCase = ["foo", "bar", "foobar"]
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search_batch(__UpperCamelCase )
_UpperCAmelCase = [scores[0] for scores in total_scores]
_UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCamelCase )
# batched queries with timeout
_UpperCAmelCase = ["foo", "bar", "foobar"]
_UpperCAmelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCAmelCase , _UpperCAmelCase = index.search_batch(__UpperCamelCase , request_timeout=30 )
_UpperCAmelCase = [scores[0] for scores in total_scores]
_UpperCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCamelCase )
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# Initialise PyTorch model
_UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase )
print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) )
_UpperCAmelCase = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print("Save PyTorch model to {}".format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
__lowerCAmelCase = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowerCAmelCase ) -> int:
_UpperCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__lowerCAmelCase = [None] * 1_0_0_0_0_0_0_0
__lowerCAmelCase = True
__lowerCAmelCase = False
def __lowerCamelCase ( _lowerCAmelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCAmelCase = chain(next_number(_lowerCAmelCase ) )
_UpperCAmelCase = number_chain
while number < 10_000_000:
_UpperCAmelCase = number_chain
number *= 10
return number_chain
def __lowerCamelCase ( _lowerCAmelCase = 10_000_000 ) -> int:
for i in range(1 , _lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 684 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
[{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}],
] , )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
_UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase = "How many cats are there?"
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
_UpperCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
| 684 | 1 |
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]:
# Check if the input is valid
if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer
def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ):
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**__UpperCamelCase )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = "post_processor"
_UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["sep"] )
if "cls" in state:
_UpperCAmelCase = tuple(state["cls"] )
_UpperCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
_UpperCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
_UpperCAmelCase = value
def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ):
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCamelCase )
_UpperCAmelCase = " ".join(__UpperCamelCase )
_UpperCAmelCase = self.encode(__UpperCamelCase )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 684 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
_UpperCAmelCase = []
for line in lines:
_UpperCAmelCase = re.sub(r"#.*" , "" , _lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(_lowerCAmelCase )
_UpperCAmelCase = "\n".join(_lowerCAmelCase )
# Make a hash from all this code
_UpperCAmelCase = full_str.encode("utf-8" )
return shaaaa(_lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
__lowerCAmelCase = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowerCAmelCase = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowerCAmelCase = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
__lowerCAmelCase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 684 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
_UpperCAmelCase = n
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # adjacency matrix for weight
_UpperCAmelCase = [
[math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = w
def UpperCAmelCase__ ( self : Dict ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ):
return self.dp[u][v]
if __name__ == "__main__":
__lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 684 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCAmelCase = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False ) -> List[Any]:
_UpperCAmelCase , _UpperCAmelCase = create_model(
"HTSAT-tiny" , "roberta" , _lowerCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_lowerCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = {}
_UpperCAmelCase = r".*sequential.(\d+).*"
_UpperCAmelCase = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCAmelCase = key.replace(_lowerCAmelCase , _lowerCAmelCase )
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
# replace sequential layers with list
_UpperCAmelCase = re.match(_lowerCAmelCase , _lowerCAmelCase ).group(1 )
_UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_lowerCAmelCase )//3}.linear.''' )
elif re.match(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = int(re.match(_lowerCAmelCase , _lowerCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_UpperCAmelCase = 1 if projecton_layer == 0 else 2
_UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
_UpperCAmelCase = value
_UpperCAmelCase = mixed_qkv.size(0 ) // 3
_UpperCAmelCase = mixed_qkv[:qkv_dim]
_UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
_UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
_UpperCAmelCase = query_layer
_UpperCAmelCase = key_layer
_UpperCAmelCase = value_layer
else:
_UpperCAmelCase = value
return model_state_dict
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]:
_UpperCAmelCase , _UpperCAmelCase = init_clap(_lowerCAmelCase , enable_fusion=_lowerCAmelCase )
clap_model.eval()
_UpperCAmelCase = clap_model.state_dict()
_UpperCAmelCase = rename_state_dict(_lowerCAmelCase )
_UpperCAmelCase = ClapConfig()
_UpperCAmelCase = enable_fusion
_UpperCAmelCase = ClapModel(_lowerCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
transformers_config.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
__lowerCAmelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 684 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Dict = VQModel
__SCREAMING_SNAKE_CASE : Optional[int] = """sample"""
@property
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ):
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Tuple ):
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : str ):
return (3, 32, 32)
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
pass
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
| 684 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.