code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
def lowerCAmelCase_ ( __A ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = { "^": 3, "*": 2, "/": 2, "%": 2, "+": 1, "-": 1, } # Priority of each operator UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) if (len(SCREAMING_SNAKE_CASE__ ) > 7) else 7 # Print table header for output print( "Symbol".center(8 ), "Stack".center(SCREAMING_SNAKE_CASE__ ), "Postfix".center(SCREAMING_SNAKE_CASE__ ), sep=" | ", ) print("-" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(SCREAMING_SNAKE_CASE__ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(SCREAMING_SNAKE_CASE__ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(SCREAMING_SNAKE_CASE__ ) == 0: stack.append(SCREAMING_SNAKE_CASE__ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(SCREAMING_SNAKE_CASE__ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(SCREAMING_SNAKE_CASE__ ) # push x to stack print( x.center(8 ), ("".join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ), ("".join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ), sep=" | ", ) # Output in tabular format while len(SCREAMING_SNAKE_CASE__ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( " ".center(8 ), ("".join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ), ("".join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ), sep=" | ", ) # Output in tabular format return "".join(SCREAMING_SNAKE_CASE__ ) # return Postfix as str def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' UpperCAmelCase__ = list(infix[::-1] ) # reverse the infix equation for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if infix[i] == "(": UpperCAmelCase__ = ")" # change "(" to ")" elif infix[i] == ")": UpperCAmelCase__ = "(" # change ")" to "(" return (infix_2_postfix("".join(SCREAMING_SNAKE_CASE__ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": UpperCamelCase__ = input('\nEnter an Infix Equation = ') # Input an Infix equation UpperCamelCase__ = ''''''.join(Infix.split()) # Remove spaces from the input print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
486
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = ["image_processor", "tokenizer"] UpperCAmelCase_ = "AutoImageProcessor" UpperCAmelCase_ = "AutoTokenizer" def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", _UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : int = self.image_processor SCREAMING_SNAKE_CASE__ : Any = False def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : Optional[int] = args[0] SCREAMING_SNAKE_CASE__ : str = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase ) if text is not None: SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"] return inputs def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase ) def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any: """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase ) @contextmanager def A_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer yield SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any: """simple docstring""" if added_vocab is None: SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab() SCREAMING_SNAKE_CASE__ : str = {} while tokens: SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE ) if start_token is None: break SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 ) SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE ) SCREAMING_SNAKE_CASE__ : Any = start_token.group() if end_token is None: SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group() SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE ) if content is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase ) if value: if len(_UpperCAmelCase ) == 1: SCREAMING_SNAKE_CASE__ : str = value[0] SCREAMING_SNAKE_CASE__ : List[str] = value else: # leaf nodes SCREAMING_SNAKE_CASE__ : Optional[int] = [] for leaf in content.split(r"<sep/>" ): SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens output[key].append(_UpperCAmelCase ) if len(output[key] ) == 1: SCREAMING_SNAKE_CASE__ : str = output[key][0] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase ) if len(_UpperCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def A_ ( self : str ) -> Optional[Any]: """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, ) return self.image_processor_class @property def A_ ( self : int ) -> List[str]: """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, ) return self.image_processor
663
0
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ): @register_to_config def __init__( self , lowercase__ = 7_6_8 , ): super().__init__() __UpperCAmelCase : str = nn.Parameter(torch.zeros(1 , _UpperCAmelCase)) __UpperCAmelCase : int = nn.Parameter(torch.ones(1 , _UpperCAmelCase)) def A( self , lowercase__ = None , lowercase__ = None , ): __UpperCAmelCase : int = nn.Parameter(self.mean.to(_UpperCAmelCase).to(_UpperCAmelCase)) __UpperCAmelCase : int = nn.Parameter(self.std.to(_UpperCAmelCase).to(_UpperCAmelCase)) return self def A( self , lowercase__): __UpperCAmelCase : Dict = (embeds - self.mean) * 1.0 / self.std return embeds def A( self , lowercase__): __UpperCAmelCase : Union[str, Any] = (embeds * self.std) + self.mean return embeds
462
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCamelCase : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = ['''BartphoTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
663
0
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class _lowerCamelCase ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" snake_case = CpmAntTokenizer snake_case = False def _snake_case ( self )->Optional[int]: '''simple docstring''' super().setUp() A_ : Optional[int] = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def _snake_case ( self )->Any: '''simple docstring''' A_ : Optional[int] = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) A_ : List[Any] = "今天天气真好!" A_ : Union[str, Any] = ["今天", "天气", "真", "好", "!"] A_ : str = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) A_ : Dict = "今天天气真好!" A_ : str = [tokenizer.bos_token] + tokens A_ : Tuple = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) A_ : Optional[Any] = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
590
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase : """simple docstring""" def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = image_size SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = num_channels SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim SCREAMING_SNAKE_CASE__ : List[Any] = depths SCREAMING_SNAKE_CASE__ : List[str] = num_heads SCREAMING_SNAKE_CASE__ : str = window_size SCREAMING_SNAKE_CASE__ : Any = mlp_ratio SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate SCREAMING_SNAKE_CASE__ : Dict = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings SCREAMING_SNAKE_CASE__ : Tuple = patch_norm SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = is_training SCREAMING_SNAKE_CASE__ : List[Any] = scope SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride SCREAMING_SNAKE_CASE__ : List[Any] = out_features SCREAMING_SNAKE_CASE__ : Dict = out_indices def A_ ( self : List[str] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : str = None if self.use_labels: SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config() return config, pixel_values, labels def A_ ( self : Optional[int] ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, ) def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) ) def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(_UpperCAmelCase ): SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"] SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase ) def A_ ( self : str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self ) SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def A_ ( self : Any ) -> List[Any]: """simple docstring""" pass def A_ ( self : Tuple ) -> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ ( self : int ) -> Optional[Any]: """simple docstring""" return def A_ ( self : Any ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def A_ ( self : List[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) @unittest.skip("Swin does not use inputs_embeds" ) def A_ ( self : Any ) -> Optional[int]: """simple docstring""" pass @unittest.skip("Swin does not support feedforward chunking" ) def A_ ( self : List[str] ) -> Optional[int]: """simple docstring""" pass def A_ ( self : Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) ) def A_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1], _UpperCAmelCase ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def A_ ( self : Dict ) -> List[str]: """simple docstring""" pass def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase ) # Swin has a different seq_length SCREAMING_SNAKE_CASE__ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], ) def A_ ( self : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) def A_ ( self : Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Optional[int] = 3 SCREAMING_SNAKE_CASE__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) SCREAMING_SNAKE_CASE__ : str = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Any = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def A_ ( self : List[Any] ) -> Dict: """simple docstring""" pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def A_ ( self : Dict ) -> str: """simple docstring""" pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def A_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" pass def A_ ( self : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ): SCREAMING_SNAKE_CASE__ : Dict = 0 return t def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ): with torch.no_grad(): SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple() def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ): if isinstance(_UpperCAmelCase, (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ): recursive_check(_UpperCAmelCase, _UpperCAmelCase ) elif isinstance(_UpperCAmelCase, _UpperCAmelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(_UpperCAmelCase, _UpperCAmelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=( "Tuple and dict output are not equal. Difference:" F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has''' F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.''' ), ) recursive_check(_UpperCAmelCase, _UpperCAmelCase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} ) SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} ) @require_torch class lowerCamelCase (unittest.TestCase , __lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else () UpperCAmelCase_ = MaskFormerSwinConfig def A_ ( self : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self ) def A_ ( self : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase ) backbone.to(_UpperCAmelCase ) backbone.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ): self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase ) self.assertIsNotNone(outputs.attentions )
663
0
def _lowercase ( __lowerCamelCase : list ,__lowerCamelCase : list ,__lowerCamelCase : int ) -> int: '''simple docstring''' if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. UpperCamelCase__ : str = [p / w for p, w in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )] # Creating a copy of the list and sorting profit/weight in ascending order UpperCamelCase__ : Union[str, Any] = sorted(SCREAMING_SNAKE_CASE__ ) # declaring useful variables UpperCamelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : int = 0 UpperCamelCase__ : Tuple = 0 UpperCamelCase__ : List[str] = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight UpperCamelCase__ : Optional[Any] = sorted_profit_by_weight[length - i - 1] UpperCamelCase__ : Dict = profit_by_weight.index(SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : str = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( """Input profits, weights, and then max_weight (all positive ints) separated by """ """spaces.""" ) _SCREAMING_SNAKE_CASE : Tuple = [int(x) for x in input("""Input profits separated by spaces: """).split()] _SCREAMING_SNAKE_CASE : Union[str, Any] = [int(x) for x in input("""Input weights separated by spaces: """).split()] _SCREAMING_SNAKE_CASE : str = int(input("""Max weight allowed: """)) # Function Call calc_profit(profit, weight, max_weight)
344
from ....configuration_utils import PretrainedConfig from ....utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) # TODO: upload to AWS _lowerCamelCase : str = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = "retribert" def __init__( self : Optional[Any], _UpperCAmelCase : Dict=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : Tuple=8, _UpperCAmelCase : Optional[Any]=1_2, _UpperCAmelCase : Union[str, Any]=3_0_7_2, _UpperCAmelCase : Dict="gelu", _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : List[str]=5_1_2, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Dict=0.02, _UpperCAmelCase : Any=1E-12, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Any=1_2_8, _UpperCAmelCase : int=0, **_UpperCAmelCase : List[str], ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = vocab_size SCREAMING_SNAKE_CASE__ : str = hidden_size SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = hidden_act SCREAMING_SNAKE_CASE__ : int = intermediate_size SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : List[str] = initializer_range SCREAMING_SNAKE_CASE__ : int = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[Any] = share_encoders SCREAMING_SNAKE_CASE__ : int = projection_dim
663
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name a : Any = ''' Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "A red cartoon frog, 4k" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/frog.png" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save("red_frog.png") ``` ''' def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any]=8 ) ->Tuple: '''simple docstring''' a : int = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 a : List[str] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : str=512 , _lowercase : Optional[Any]=512 ) ->Optional[Any]: '''simple docstring''' a : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) a : List[Any] = np.array(pil_image.convert("RGB" ) ) a : str = arr.astype(np.floataa ) / 127.5 - 1 a : Union[str, Any] = np.transpose(SCREAMING_SNAKE_CASE__ , [2, 0, 1] ) a : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) return image class __UpperCamelCase ( __lowerCamelCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]: super().__init__() self.register_modules( unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , movq=_UpperCAmelCase , ) a : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: a : List[str] = min(int(num_inference_steps * strength ) , _UpperCAmelCase ) a : List[str] = max(num_inference_steps - init_timestep , 0 ) a : Union[str, Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[str]: if not isinstance(_UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCAmelCase )}""" ) a : Tuple = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) a : Optional[int] = batch_size * num_images_per_prompt if image.shape[1] == 4: a : List[str] = image else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): a : Dict = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase ) ] a : Tuple = torch.cat(_UpperCAmelCase , dim=0 ) else: a : str = self.movq.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase ) a : Tuple = self.movq.config.scaling_factor * init_latents a : Tuple = torch.cat([init_latents] , dim=0 ) a : List[Any] = init_latents.shape a : Union[str, Any] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase ) # get latents a : Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) a : List[Any] = init_latents return latents def __a ( self , lowerCAmelCase__=0 ) -> List[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) a : Any = torch.device(f"""cuda:{gpu_id}""" ) a : Tuple = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCAmelCase , _UpperCAmelCase ) def __a ( self , lowerCAmelCase__=0 ) -> Any: if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) a : Union[str, Any] = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=_UpperCAmelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) a : Any = None for cpu_offloaded_model in [self.unet, self.movq]: a : int = cpu_offload_with_hook(_UpperCAmelCase , _UpperCAmelCase , prev_module_hook=_UpperCAmelCase ) # We'll offload the last model manually. a : Any = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __a ( self ) -> Union[str, Any]: if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(_UpperCAmelCase , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_UpperCAmelCase ) def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 0.3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Any: a : Optional[int] = self._execution_device a : Tuple = guidance_scale > 1.0 if isinstance(_UpperCAmelCase , _UpperCAmelCase ): a : Dict = torch.cat(_UpperCAmelCase , dim=0 ) a : Any = image_embeds.shape[0] if isinstance(_UpperCAmelCase , _UpperCAmelCase ): a : Tuple = torch.cat(_UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: a : str = image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 ) a : int = negative_image_embeds.repeat_interleave(_UpperCAmelCase , dim=0 ) a : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCAmelCase ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): a : List[str] = [image] if not all(isinstance(_UpperCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"""Input is in incorrect format: {[type(_UpperCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) a : List[Any] = torch.cat([prepare_image(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for i in image] , dim=0 ) a : str = image.to(dtype=image_embeds.dtype , device=_UpperCAmelCase ) a : int = self.movq.encode(_UpperCAmelCase )["latents"] a : List[str] = latents.repeat_interleave(_UpperCAmelCase , dim=0 ) self.scheduler.set_timesteps(_UpperCAmelCase , device=_UpperCAmelCase ) a : List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) a : List[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt ) a : List[Any] = downscale_height_and_width(_UpperCAmelCase , _UpperCAmelCase , self.movq_scale_factor ) a : Optional[int] = self.prepare_latents( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , image_embeds.dtype , _UpperCAmelCase , _UpperCAmelCase ) for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance a : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents a : Dict = {"image_embeds": image_embeds} a : str = self.unet( sample=_UpperCAmelCase , timestep=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , added_cond_kwargs=_UpperCAmelCase , return_dict=_UpperCAmelCase , )[0] if do_classifier_free_guidance: a : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) a : Union[str, Any] = noise_pred.chunk(2 ) a : List[Any] = variance_pred.chunk(2 ) a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) a : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): a : List[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 a : Tuple = self.scheduler.step( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase , )[0] # post-processing a : Union[str, Any] = self.movq.decode(_UpperCAmelCase , force_not_quantize=_UpperCAmelCase )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: a : List[str] = image * 0.5 + 0.5 a : Optional[Any] = image.clamp(0 , 1 ) a : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": a : int = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCAmelCase )
633
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCamelCase : int = False @skip_mps class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline UpperCAmelCase_ = False UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} ) UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def A_ ( cls : str ) -> Union[str, Any]: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) @classmethod def A_ ( cls : Tuple ) -> str: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) def A_ ( self : Any ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL( block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_2_8, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act="gelu", projection_dim=5_1_2, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModel(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=0 ) -> Optional[Any]: """simple docstring""" if str(_UpperCAmelCase ).startswith("mps" ): SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_UpperCAmelCase ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = { "prompt": "a cat and a frog", "token_indices": [2, 5], "generator": generator, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", "max_iter_to_alter": 2, "thresholds": {0: 0.7}, } return inputs def A_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = "cpu" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = pipe(**_UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 6_4, 6_4, 3) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) SCREAMING_SNAKE_CASE__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_UpperCAmelCase, 1E-3 ) def A_ ( self : str ) -> List[Any]: """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def A_ ( self : Any ) -> str: """simple docstring""" # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A_ ( self : Optional[Any] ) -> str: """simple docstring""" self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 ) def A_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A_ ( self : Any ) -> List[str]: """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().test_save_load_local(expected_max_difference=5E-4 ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class lowerCamelCase (unittest.TestCase ): """simple docstring""" @classmethod def A_ ( cls : Union[str, Any] ) -> Tuple: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) @classmethod def A_ ( cls : List[str] ) -> List[str]: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) def A_ ( self : str ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(5_1 ) SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=_UpperCAmelCase, torch_dtype=torch.floataa ) pipe.to("cuda" ) SCREAMING_SNAKE_CASE__ : List[str] = "a painting of an elephant with glasses" SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 7] SCREAMING_SNAKE_CASE__ : str = pipe( prompt=_UpperCAmelCase, token_indices=_UpperCAmelCase, guidance_scale=7.5, generator=_UpperCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0] SCREAMING_SNAKE_CASE__ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" ) assert np.abs((expected_image - image).max() ) < 5E-1
663
0
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int): A_ : Optional[Any] = tmp_path_factory.mktemp("""dset_infos_dir""") if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""") as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""") if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""") as f: f.write("""""") # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""") as f: f.write("""{\"default\": {\"dataset_size\": 42}}""") A_ : Union[str, Any] = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE__) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""")}) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : DatasetInfo): A_ : str = str(SCREAMING_SNAKE_CASE__) dataset_info.write_to_directory(SCREAMING_SNAKE_CASE__) A_ : int = DatasetInfo.from_directory(SCREAMING_SNAKE_CASE__) assert dataset_info == reloaded assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """dataset_info.json""")) def lowerCamelCase ( ): A_ : Union[str, Any] = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""")}) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) A_ : str = dataset_info._to_yaml_dict() assert sorted(SCREAMING_SNAKE_CASE__) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str)) A_ : Tuple = yaml.safe_dump(SCREAMING_SNAKE_CASE__) A_ : Dict = yaml.safe_load(SCREAMING_SNAKE_CASE__) assert dataset_info_yaml_dict == reloaded def lowerCamelCase ( ): A_ : int = DatasetInfo() A_ : Union[str, Any] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()}), DatasetInfosDict({"""my_config_name""": DatasetInfo()}), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""")}) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) }), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42), """v2""": DatasetInfo(dataset_size=1337), }), ] , ) def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : DatasetInfosDict): A_ : Union[str, Any] = str(SCREAMING_SNAKE_CASE__) dataset_infos_dict.write_to_directory(SCREAMING_SNAKE_CASE__) A_ : Optional[int] = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE__) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): A_ : Optional[int] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml A_ : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict()) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """README.md"""))
665
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class lowerCamelCase : """simple docstring""" UpperCAmelCase_ = PegasusConfig UpperCAmelCase_ = {} UpperCAmelCase_ = "gelu" def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=4_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=1, _UpperCAmelCase : str=0, ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = seq_length SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : Tuple = vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : str = eos_token_id SCREAMING_SNAKE_CASE__ : Dict = pad_token_id SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id def A_ ( self : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 ) SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) SCREAMING_SNAKE_CASE__ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) return config, inputs_dict def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder() SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["input_ids"] SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :] SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["attention_mask"][:1, :] SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["head_mask"] SCREAMING_SNAKE_CASE__ : int = 1 # first forward pass SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3), config.vocab_size ) SCREAMING_SNAKE_CASE__ : str = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 ) def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Any: '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase_ = ( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase_ = True UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : List[str] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = TFPegasusModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase ) def A_ ( self : Optional[Any] ) -> int: """simple docstring""" self.config_tester.run_common_tests() def A_ ( self : str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class lowerCamelCase (unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] UpperCAmelCase_ = [ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCAmelCase_ = "google/pegasus-xsum" @cached_property def A_ ( self : Union[str, Any] ) -> int: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def A_ ( self : str, **_UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.translate_src_text(**_UpperCAmelCase ) assert self.expected_text == generated_words def A_ ( self : Any, **_UpperCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, **_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="tf" ) SCREAMING_SNAKE_CASE__ : List[str] = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def A_ ( self : List[Any] ) -> Any: """simple docstring""" self._assert_generated_batch_equal_expected()
663
0
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class __a ( __lowerCamelCase ): __snake_case : List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : str , UpperCAmelCase : Tuple="</s>" , UpperCAmelCase : Union[str, Any]="<unk>" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : Optional[int]=1_25 , UpperCAmelCase : str=None , **UpperCAmelCase : Optional[int] , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: lowerCAmelCase_ : Optional[int] = [F'<extra_id_{i}>' for i in range(_UpperCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens lowerCAmelCase_ : Optional[Any] = len(set(filter(lambda UpperCAmelCase : bool("""extra_id""" in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) lowerCAmelCase_ : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token lowerCAmelCase_ : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token lowerCAmelCase_ : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token super().__init__( eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) lowerCAmelCase_ : List[Any] = extra_ids lowerCAmelCase_ : Optional[int] = 2**8 # utf is 8 bits # define special tokens dict lowerCAmelCase_ : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } lowerCAmelCase_ : str = len(self.special_tokens_encoder ) lowerCAmelCase_ : Tuple = len(_UpperCAmelCase ) for i, token in enumerate(_UpperCAmelCase ): lowerCAmelCase_ : List[Any] = self.vocab_size + i - n lowerCAmelCase_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def A ( self : List[Any] ): return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def A ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_UpperCAmelCase )) + [1] return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1] def A ( self : Optional[Any] , UpperCAmelCase : List[int] ): if len(_UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def A ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): lowerCAmelCase_ : Union[str, Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def A ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): lowerCAmelCase_ : Optional[int] = self._add_eos_if_not_present(_UpperCAmelCase ) if token_ids_a is None: return token_ids_a else: lowerCAmelCase_ : List[str] = self._add_eos_if_not_present(_UpperCAmelCase ) return token_ids_a + token_ids_a def A ( self : Optional[Any] , UpperCAmelCase : str ): lowerCAmelCase_ : Dict = [chr(_UpperCAmelCase ) for i in text.encode("""utf-8""" )] return tokens def A ( self : Union[str, Any] , UpperCAmelCase : Dict ): if token in self.special_tokens_encoder: lowerCAmelCase_ : Dict = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: lowerCAmelCase_ : int = self.added_tokens_encoder[token] elif len(_UpperCAmelCase ) != 1: lowerCAmelCase_ : int = self.unk_token_id else: lowerCAmelCase_ : Tuple = ord(_UpperCAmelCase ) + self._num_special_tokens return token_id def A ( self : int , UpperCAmelCase : Any ): if index in self.special_tokens_decoder: lowerCAmelCase_ : Dict = self.special_tokens_decoder[index] else: lowerCAmelCase_ : List[Any] = chr(index - self._num_special_tokens ) return token def A ( self : Dict , UpperCAmelCase : Tuple ): lowerCAmelCase_ : List[str] = B"" for token in tokens: if token in self.special_tokens_decoder: lowerCAmelCase_ : str = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: lowerCAmelCase_ : int = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: lowerCAmelCase_ : Optional[Any] = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: lowerCAmelCase_ : List[str] = token.encode("""utf-8""" ) else: lowerCAmelCase_ : List[Any] = bytes([ord(_UpperCAmelCase )] ) bstring += tok_string lowerCAmelCase_ : Dict = bstring.decode("""utf-8""" , errors="""ignore""" ) return string def A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): return ()
600
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCamelCase : List[str] = { '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
663
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCamelCase : List[str] = { '''configuration_altclip''': [ '''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AltCLIPConfig''', '''AltCLIPTextConfig''', '''AltCLIPVisionConfig''', ], '''processing_altclip''': ['''AltCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = [ '''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AltCLIPPreTrainedModel''', '''AltCLIPModel''', '''AltCLIPTextModel''', '''AltCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys _lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
121
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip''' def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our RoBERTa config:" , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : str = roberta.model.classification_heads["mnli"].dense.weight SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.bias SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["mnli"].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) ) else: SCREAMING_SNAKE_CASE__ : int = roberta.model(SCREAMING_SNAKE_CASE__ )[0] print(our_output.shape , their_output.shape ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowerCamelCase : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
663
0
'''simple docstring''' from PIL import Image def snake_case_ ( _lowerCAmelCase : Image , _lowerCAmelCase : int ) -> Image: UpperCAmelCase : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCAmelCase : int ) -> int: return int(128 + factor * (c - 128) ) return img.point(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change contrast to 170 UpperCamelCase__: Dict = change_contrast(img, 170) cont_img.save("image_data/lena_high_contrast.png", format="png")
127
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = "Wav2Vec2FeatureExtractor" UpperCAmelCase_ = "AutoTokenizer" def __init__( self : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> List[str]: """simple docstring""" super().__init__(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor SCREAMING_SNAKE_CASE__ : Union[str, Any] = False @classmethod def A_ ( cls : int, _UpperCAmelCase : Dict, **_UpperCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" try: return super().from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) except OSError: warnings.warn( F'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: ", _UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : int = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) return cls(feature_extractor=_UpperCAmelCase, tokenizer=_UpperCAmelCase ) def __call__( self : Optional[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("raw_speech" ) else: SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("audio", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("sampling_rate", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("text", _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0] SCREAMING_SNAKE_CASE__ : Tuple = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_UpperCAmelCase, *_UpperCAmelCase, sampling_rate=_UpperCAmelCase, **_UpperCAmelCase ) if text is not None: SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE__ : List[str] = encodings["input_ids"] return inputs def A_ ( self : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("input_features", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("labels", _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0] SCREAMING_SNAKE_CASE__ : Dict = args[1:] if input_features is not None: SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor.pad(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase ) if labels is not None: SCREAMING_SNAKE_CASE__ : str = self.tokenizer.pad(_UpperCAmelCase, **_UpperCAmelCase ) if labels is None: return input_features elif input_features is None: return labels else: SCREAMING_SNAKE_CASE__ : List[str] = labels["input_ids"] return input_features def A_ ( self : Union[str, Any], *_UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase ) def A_ ( self : Optional[int], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase ) @contextmanager def A_ ( self : Optional[int] ) -> Any: """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : int = self.tokenizer yield SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor SCREAMING_SNAKE_CASE__ : Optional[Any] = False
663
0
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _lowercase ( ): import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join _a = "__test_patch_submodule_mock__" with patch_submodule(_test_patching, "os.path.join", SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os, _PatchedModuleObj ) assert isinstance(_test_patching.os.path, _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path, _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os, _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path, _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _lowercase ( ): assert _test_patching.open is open _a = "__test_patch_submodule_builtin_mock__" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching, "open", SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _lowercase ( ): _a = "__test_patch_submodule_missing_mock__" with patch_submodule(_test_patching, "pandas.read_csv", SCREAMING_SNAKE_CASE__ ): pass def _lowercase ( ): _a = "__test_patch_submodule_missing_builtin_mock__" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching, "len", SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching, "len", SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _lowercase ( ): _a = "__test_patch_submodule_start_and_stop_mock__" _a = patch_submodule(_test_patching, "open", SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _lowercase ( ): from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join _a = "__test_patch_submodule_successive_join__" _a = "__test_patch_submodule_successive_dirname__" _a = "__test_patch_submodule_successive_rename__" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching, "os.path.join", SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching, "os.rename", SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching, "os.path.dirname", SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching, "os.rename", SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching, "os.path.join", SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching, "os.path.dirname", SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _lowercase ( ): _a = "__test_patch_submodule_doesnt_exist_mock__" with patch_submodule(_test_patching, "__module_that_doesn_exist__.__attribute_that_doesn_exist__", SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching, "os.__attribute_that_doesn_exist__", SCREAMING_SNAKE_CASE__ ): pass
131
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
663
0
'''simple docstring''' from __future__ import annotations __SCREAMING_SNAKE_CASE = 1_0 def __a ( lowerCAmelCase__ : list[int] ): a__ : Optional[Any] = 1 a__ : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ ) while placement <= max_digit: # declare and initialize empty buckets a__ : list[list] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] # split list_of_ints between the buckets for i in list_of_ints: a__ : List[str] = int((i / placement) % RADIX ) buckets[tmp].append(SCREAMING_SNAKE_CASE__ ) # put each buckets' contents into list_of_ints a__ : Tuple = 0 for b in range(SCREAMING_SNAKE_CASE__ ): for i in buckets[b]: a__ : Optional[Any] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
688
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = '''▁''' _lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} _lowerCamelCase : Dict = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } _lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4} class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = VOCAB_FILES_NAMES UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ = ["input_ids", "attention_mask"] def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Optional[int] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids: SCREAMING_SNAKE_CASE__ : Dict = cnt cnt += 1 with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f: for line in f.readlines(): SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0] SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids ) if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids: SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy() SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto() return state def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = d # for backward compatibility if not hasattr(self, "sp_model_kwargs" ): SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id] SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A_ ( self : Any ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def A_ ( self : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase ) def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def A_ ( self : List[str], _UpperCAmelCase : str ) -> str: """simple docstring""" return self.fairseq_ids_to_tokens[index] def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip() return out_string def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE__ : List[str] = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase, "wb" ) as fi: SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F'''{str(_UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
663
0
def lowerCAmelCase_ ( __A, __A ) -> float: '''simple docstring''' def get_matched_characters(__A, __A ) -> str: UpperCAmelCase__ = [] UpperCAmelCase__ = min(len(_stra ), len(_stra ) ) // 2 for i, l in enumerate(_stra ): UpperCAmelCase__ = int(max(0, i - limit ) ) UpperCAmelCase__ = int(min(i + limit + 1, len(_stra ) ) ) if l in _stra[left:right]: matched.append(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = f"""{_stra[0:_stra.index(SCREAMING_SNAKE_CASE__ )]} {_stra[_stra.index(SCREAMING_SNAKE_CASE__ ) + 1:]}""" return "".join(SCREAMING_SNAKE_CASE__ ) # matching characters UpperCAmelCase__ = get_matched_characters(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = get_matched_characters(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) # transposition UpperCAmelCase__ = ( len([(ca, ca) for ca, ca in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if ca != ca] ) // 2 ) if not match_count: UpperCAmelCase__ = 0.0 else: UpperCAmelCase__ = ( 1 / 3 * ( match_count / len(SCREAMING_SNAKE_CASE__ ) + match_count / len(SCREAMING_SNAKE_CASE__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters UpperCAmelCase__ = 0 for ca, ca in zip(stra[:4], stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
486
from random import shuffle import tensorflow as tf from numpy import array def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = int(SCREAMING_SNAKE_CASE__ ) assert noofclusters < len(SCREAMING_SNAKE_CASE__ ) # Find out the dimensionality SCREAMING_SNAKE_CASE__ : List[Any] = len(vectors[0] ) # Will help select random centroids from among the available vectors SCREAMING_SNAKE_CASE__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) ) shuffle(SCREAMING_SNAKE_CASE__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. SCREAMING_SNAKE_CASE__ : Tuple = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION SCREAMING_SNAKE_CASE__ : List[Any] = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points SCREAMING_SNAKE_CASE__ : Any = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float64" , [dim] ) SCREAMING_SNAKE_CASE__ : Dict = [] for centroid in centroids: cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) SCREAMING_SNAKE_CASE__ : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("int32" ) SCREAMING_SNAKE_CASE__ : Tuple = [] for assignment in assignments: cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input SCREAMING_SNAKE_CASE__ : int = tf.placeholder("float" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [dim] ) SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float" , [dim] ) SCREAMING_SNAKE_CASE__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("float" , [noofclusters] ) SCREAMING_SNAKE_CASE__ : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. SCREAMING_SNAKE_CASE__ : Tuple = tf.initialize_all_variables() # Initialize all variables sess.run(SCREAMING_SNAKE_CASE__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. SCREAMING_SNAKE_CASE__ : Tuple = 1_00 for _ in range(SCREAMING_SNAKE_CASE__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ): SCREAMING_SNAKE_CASE__ : Any = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. SCREAMING_SNAKE_CASE__ : Tuple = [ sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input SCREAMING_SNAKE_CASE__ : Any = sess.run( SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(SCREAMING_SNAKE_CASE__ ): # Collect all the vectors assigned to this cluster SCREAMING_SNAKE_CASE__ : Dict = [ vectors[i] for i in range(len(SCREAMING_SNAKE_CASE__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location SCREAMING_SNAKE_CASE__ : str = sess.run( SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments SCREAMING_SNAKE_CASE__ : int = sess.run(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = sess.run(SCREAMING_SNAKE_CASE__ ) return centroids, assignments
663
0
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser( description=( """Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""]) parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") lowerCAmelCase = parser.parse_args() if args.model_type == "bert": lowerCAmelCase = BertForMaskedLM.from_pretrained(args.model_name) lowerCAmelCase = '''bert''' else: raise ValueError("""args.model_type should be \"bert\".""") lowerCAmelCase = model.state_dict() lowerCAmelCase = {} for w in ["word_embeddings", "position_embeddings"]: lowerCAmelCase = state_dict[F'{prefix}.embeddings.{w}.weight'] for w in ["weight", "bias"]: lowerCAmelCase = state_dict[F'{prefix}.embeddings.LayerNorm.{w}'] lowerCAmelCase = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCAmelCase = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}' ] lowerCAmelCase = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}' ] lowerCAmelCase = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}' ] lowerCAmelCase = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}' ] lowerCAmelCase = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}' ] lowerCAmelCase = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}' ] lowerCAmelCase = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}' ] lowerCAmelCase = state_dict[ F'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}' ] std_idx += 1 lowerCAmelCase = state_dict['''cls.predictions.decoder.weight'''] lowerCAmelCase = state_dict['''cls.predictions.bias'''] if args.vocab_transform: for w in ["weight", "bias"]: lowerCAmelCase = state_dict[F'cls.predictions.transform.dense.{w}'] lowerCAmelCase = state_dict[F'cls.predictions.transform.LayerNorm.{w}'] print(F'N layers selected for distillation: {std_idx}') print(F'Number of params transferred for distillation: {len(compressed_sd.keys())}') print(F'Save transferred checkpoint to {args.dump_checkpoint}.') torch.save(compressed_sd, args.dump_checkpoint)
462
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( '''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion''' ) _lowerCamelCase : List[str] = None _lowerCamelCase : Union[str, Any] = { '''7B''': 1_1_0_0_8, '''13B''': 1_3_8_2_4, '''30B''': 1_7_9_2_0, '''65B''': 2_2_0_1_6, '''70B''': 2_8_6_7_2, } _lowerCamelCase : Optional[Any] = { '''7B''': 1, '''7Bf''': 1, '''13B''': 2, '''13Bf''': 2, '''30B''': 4, '''65B''': 8, '''70B''': 8, '''70Bf''': 8, } def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : str=2_56 ) -> int: '''simple docstring''' return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=True ) -> int: '''simple docstring''' os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "tmp" ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(SCREAMING_SNAKE_CASE__ , "params.json" ) ) SCREAMING_SNAKE_CASE__ : int = NUM_SHARDS[model_size] SCREAMING_SNAKE_CASE__ : Union[str, Any] = params["n_layers"] SCREAMING_SNAKE_CASE__ : List[str] = params["n_heads"] SCREAMING_SNAKE_CASE__ : Optional[Any] = n_heads // num_shards SCREAMING_SNAKE_CASE__ : str = params["dim"] SCREAMING_SNAKE_CASE__ : List[str] = dim // n_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_0_0_0_0.0 SCREAMING_SNAKE_CASE__ : Tuple = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE__ , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: SCREAMING_SNAKE_CASE__ : int = params["n_kv_heads"] # for GQA / MQA SCREAMING_SNAKE_CASE__ : Optional[int] = n_heads_per_shard // num_key_value_heads SCREAMING_SNAKE_CASE__ : int = dim // num_key_value_heads else: # compatibility with other checkpoints SCREAMING_SNAKE_CASE__ : Dict = n_heads SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard SCREAMING_SNAKE_CASE__ : Dict = dim # permute for sliced rotary def permute(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=n_heads , SCREAMING_SNAKE_CASE__ : List[str]=dim , SCREAMING_SNAKE_CASE__ : Dict=dim ): return w.view(SCREAMING_SNAKE_CASE__ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) SCREAMING_SNAKE_CASE__ : Dict = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , "consolidated.00.pth" ) , map_location="cpu" ) else: # Sharded SCREAMING_SNAKE_CASE__ : List[Any] = [ torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , f'''consolidated.{i:02d}.pth''' ) , map_location="cpu" ) for i in range(SCREAMING_SNAKE_CASE__ ) ] SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : List[str] = {"weight_map": {}} for layer_i in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded SCREAMING_SNAKE_CASE__ : List[Any] = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. SCREAMING_SNAKE_CASE__ : Any = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } SCREAMING_SNAKE_CASE__ : int = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : Tuple = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : List[str] = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ) SCREAMING_SNAKE_CASE__ : List[str] = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ) SCREAMING_SNAKE_CASE__ : Tuple = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ) SCREAMING_SNAKE_CASE__ : int = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ) SCREAMING_SNAKE_CASE__ : List[str] = inv_freq for k, v in state_dict.items(): SCREAMING_SNAKE_CASE__ : str = filename param_count += v.numel() torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded SCREAMING_SNAKE_CASE__ : List[str] = { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } else: SCREAMING_SNAKE_CASE__ : Optional[Any] = { "model.norm.weight": loaded[0]["norm.weight"], "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ), } for k, v in state_dict.items(): SCREAMING_SNAKE_CASE__ : Optional[int] = filename param_count += v.numel() torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # Write configs SCREAMING_SNAKE_CASE__ : Optional[Any] = {"total_size": param_count * 2} write_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin.index.json" ) ) SCREAMING_SNAKE_CASE__ : List[str] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 SCREAMING_SNAKE_CASE__ : Dict = params["multiple_of"] if "multiple_of" in params else 2_56 SCREAMING_SNAKE_CASE__ : Dict = LlamaConfig( hidden_size=SCREAMING_SNAKE_CASE__ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=SCREAMING_SNAKE_CASE__ , ) config.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model." ) SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ ) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format." ) model.save_pretrained(SCREAMING_SNAKE_CASE__ , safe_serialization=SCREAMING_SNAKE_CASE__ ) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_class(SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) def _a ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() parser.add_argument( "--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , ) parser.add_argument( "--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , ) parser.add_argument( "--output_dir" , help="Location to write HF model and tokenizer" , ) parser.add_argument("--safe_serialization" , type=SCREAMING_SNAKE_CASE__ , help="Whether or not to save using `safetensors`." ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(args.input_dir , "tokenizer.model" ) write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
663
0
import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class _lowerCamelCase : """simple docstring""" @staticmethod def _snake_case ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[Any]: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): A_ : List[Any] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): A_ : Tuple = np.array(SCREAMING_SNAKE_CASE__ ) A_ : str = npimg.shape return {"hash": hashimage(SCREAMING_SNAKE_CASE__ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" snake_case = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) snake_case = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]: '''simple docstring''' A_ : Any = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Any: '''simple docstring''' pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def _snake_case ( self )->List[Any]: '''simple docstring''' pass @slow @require_torch def _snake_case ( self )->Union[str, Any]: '''simple docstring''' A_ : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) A_ : Dict = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing A_ : str = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_9_6_7}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.9_9_3}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_9_0_9}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_8_7_9}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_8_3_4}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_7_1_6}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_6_1_2}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_5_9_9}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_5_5_2}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_5_3_2}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_5_1_6}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_4_9_9}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_4_8_3}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_4_6_4}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_4_0_8}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_3_3_5}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_3_2_6}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_2_6_2}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_9_9_9}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_6}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_4}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_3}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_1} ] , ) # fmt: on @require_torch @slow def _snake_case ( self )->Union[str, Any]: '''simple docstring''' A_ : str = "facebook/sam-vit-huge" A_ : List[Any] = pipeline('''mask-generation''' , model=_UpperCAmelCase ) A_ : List[Any] = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing A_ : List[str] = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1_0}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3}, ] , )
590
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase (__lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = GPTaTokenizer UpperCAmelCase_ = GPTaTokenizerFast UpperCAmelCase_ = True UpperCAmelCase_ = {"add_prefix_space": True} UpperCAmelCase_ = False def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : Any = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"} SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file, "w", encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "lower newer" SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer" return input_text, output_text def A_ ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ : Tuple = "lower newer" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase ) def A_ ( self : Dict ) -> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = "lower newer" # Testing tokenization SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing the unknown token SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase ) def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) # Simple input SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE__ : List[Any] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Simple input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Simple input self.assertRaises( _UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", ) # Pair input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Pair input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Pair input self.assertRaises( _UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", ) def A_ ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" ) # Simple input SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"] SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE__ : int = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1], 3_0 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1], 6_0 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def A_ ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$" SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase ) self.assertEqual(out_s.input_ids[0], _UpperCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0], _UpperCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def A_ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass def A_ ( self : Dict ) -> str: """simple docstring""" # TODO: change to self.get_tokenizers() when the fast version is implemented SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this." SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please." SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus( _UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"] SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase ) ] SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(_UpperCAmelCase, _UpperCAmelCase ) @require_tokenizers class lowerCamelCase (unittest.TestCase ): """simple docstring""" def A_ ( self : Optional[Any] ) -> int: """simple docstring""" # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat" SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("test_opt" ) SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat" SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode( _UpperCAmelCase, ) # Same as above self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def A_ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = "bos" SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"] SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat" SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode( _UpperCAmelCase, ) # We changed the bos token self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("./tok" ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
663
0
_SCREAMING_SNAKE_CASE : Optional[Any] = '''Tobias Carryer''' from time import time class UpperCamelCase__ : def __init__( self : int, __lowerCamelCase : Any, __lowerCamelCase : Dict, __lowerCamelCase : str, __lowerCamelCase : int=int(time() ) ) -> List[str]: # noqa: B008 UpperCamelCase__ : Union[str, Any] = multiplier UpperCamelCase__ : str = increment UpperCamelCase__ : Optional[int] = modulo UpperCamelCase__ : Tuple = seed def __lowercase( self : Tuple ) -> str: UpperCamelCase__ : List[Any] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. _SCREAMING_SNAKE_CASE : Optional[int] = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31) while True: print(lcg.next_number())
344
from functools import lru_cache def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(SCREAMING_SNAKE_CASE__ ) if n > 1: factors.add(SCREAMING_SNAKE_CASE__ ) return factors @lru_cache def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) ) def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool: '''simple docstring''' return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1) def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 2 while True: # Increment each value of a generated range SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group] checker.append(SCREAMING_SNAKE_CASE__ ) # If all numbers in the list are equal, return the group variable. if equality(SCREAMING_SNAKE_CASE__ ): return group # Increment our base variable by 1 base += 1 def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ ) return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None if __name__ == "__main__": print(solution())
663
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]: '''simple docstring''' a : int = 0 for i in range(1 , 1001 ): total += i**i return str(SCREAMING_SNAKE_CASE__ )[-10:] if __name__ == "__main__": print(solution())
633
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class lowerCamelCase (unittest.TestCase ): """simple docstring""" @require_torch def A_ ( self : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = pipeline( task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" ) SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" ) SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"] SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], ) @unittest.skip("No models are available in TF" ) def A_ ( self : str ) -> Dict: """simple docstring""" pass @slow @require_torch def A_ ( self : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline( task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", ) # This is an audio of a dog SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"] SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ) SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) SCREAMING_SNAKE_CASE__ : Any = audio_classifier( [audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) @unittest.skip("No models are available in TF" ) def A_ ( self : str ) -> List[str]: """simple docstring""" pass
663
0
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) __magic_name__ = 299_792_458 # Symbols __magic_name__ = symbols('ct x y z') def lowerCamelCase ( lowerCamelCase : float): if velocity > c: raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""") elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("""Speed must be greater than or equal to 1!""") return velocity / c def lowerCamelCase ( lowerCamelCase : float): return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE__) ** 2) def lowerCamelCase ( lowerCamelCase : float): return np.array( [ [gamma(SCREAMING_SNAKE_CASE__), -gamma(SCREAMING_SNAKE_CASE__) * beta(SCREAMING_SNAKE_CASE__), 0, 0], [-gamma(SCREAMING_SNAKE_CASE__) * beta(SCREAMING_SNAKE_CASE__), gamma(SCREAMING_SNAKE_CASE__), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ]) def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : np.ndarray | None = None): if event is None: A_ : str = np.array([ct, x, y, z]) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(SCREAMING_SNAKE_CASE__) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: __magic_name__ = transform(29_979_245) print('Example of four vector: ') print(f"""ct' = {four_vector[0]}""") print(f"""x' = {four_vector[1]}""") print(f"""y' = {four_vector[2]}""") print(f"""z' = {four_vector[3]}""") # Substitute symbols with numerical values __magic_name__ = {ct: c, x: 1, y: 1, z: 1} __magic_name__ = [four_vector[i].subs(sub_dict) for i in range(4)] print(f"""\n{numerical_vector}""")
665
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowerCamelCase : List[str] = 2_0_0 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowerCamelCase : Any = 5_0 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowerCamelCase : str = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_0_0_0)) def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, float]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] ) return (item, float(SCREAMING_SNAKE_CASE__ )) def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:] SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: SCREAMING_SNAKE_CASE__ : Tuple = random.choice(SCREAMING_SNAKE_CASE__ ) return "".join(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ) -> list[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = [] # Generate more children proportionally to the fitness score. SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 1_00 ) + 1 SCREAMING_SNAKE_CASE__ : Tuple = 10 if child_n >= 10 else child_n for _ in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0] SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ ) # Append new string to the population list. pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) return pop def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> tuple[int, int, str]: '''simple docstring''' if N_POPULATION < N_SELECTED: SCREAMING_SNAKE_CASE__ : str = f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) # Verify that the target contains no genes besides the ones inside genes variable. SCREAMING_SNAKE_CASE__ : Optional[int] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: SCREAMING_SNAKE_CASE__ : Dict = f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(SCREAMING_SNAKE_CASE__ ) # Generate random starting population. SCREAMING_SNAKE_CASE__ : List[Any] = [] for _ in range(SCREAMING_SNAKE_CASE__ ): population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) ) # Just some logs to know what the algorithms is doing. SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(SCREAMING_SNAKE_CASE__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. SCREAMING_SNAKE_CASE__ : int = [evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population] # Check if there is a matching evolution. SCREAMING_SNAKE_CASE__ : List[str] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. SCREAMING_SNAKE_CASE__ : str = population[: int(N_POPULATION / 3 )] population.clear() population.extend(SCREAMING_SNAKE_CASE__ ) # Normalize population score to be between 0 and 1. SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ (item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score ] # This is selection for i in range(SCREAMING_SNAKE_CASE__ ): population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION: break if __name__ == "__main__": _lowerCamelCase : Dict = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) _lowerCamelCase : Tuple = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = basic(target_str, genes_list) print( f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
663
0
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __a ( __lowerCamelCase ,unittest.TestCase ): __snake_case : str = CodeGenTokenizer __snake_case : str = CodeGenTokenizerFast __snake_case : Dict = True __snake_case : Dict = {"""add_prefix_space""": True} __snake_case : Optional[Any] = False def A ( self : List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ : Any = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] lowerCAmelCase_ : Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowerCAmelCase_ : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCAmelCase_ : Optional[int] = {"unk_token": "<unk>"} lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_UpperCAmelCase ) ) def A ( self : Optional[int] , **UpperCAmelCase : List[str] ): kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def A ( self : Tuple , **UpperCAmelCase : str ): kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def A ( self : Tuple , UpperCAmelCase : List[str] ): lowerCAmelCase_ : Optional[int] = "lower newer" lowerCAmelCase_ : Any = "lower newer" return input_text, output_text def A ( self : Tuple ): lowerCAmelCase_ : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase_ : Optional[int] = "lower newer" lowerCAmelCase_ : Union[str, Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] lowerCAmelCase_ : List[str] = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token] lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def A ( self : Any ): if not self.test_rust_tokenizer: return lowerCAmelCase_ : int = self.get_tokenizer() lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) lowerCAmelCase_ : str = "lower newer" # Testing tokenization lowerCAmelCase_ : List[str] = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) lowerCAmelCase_ : str = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids without special tokens lowerCAmelCase_ : str = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) lowerCAmelCase_ : str = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids with special tokens lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) lowerCAmelCase_ : Tuple = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) lowerCAmelCase_ : List[str] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing the unknown token lowerCAmelCase_ : str = tokens + [rust_tokenizer.unk_token] lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def A ( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ): pass def A ( self : str , UpperCAmelCase : List[Any]=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): lowerCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) # Simple input lowerCAmelCase_ : int = "This is a simple input" lowerCAmelCase_ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"] lowerCAmelCase_ : List[Any] = ("This is a simple input", "This is a pair") lowerCAmelCase_ : Any = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" ) # Simple input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" ) # Simple input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" ) # Pair input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding="""max_length""" , ) def A ( self : Optional[int] ): lowerCAmelCase_ : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" ) # Simple input lowerCAmelCase_ : Dict = "This is a simple input" lowerCAmelCase_ : Optional[int] = ["This is a simple input looooooooong", "This is a simple input"] lowerCAmelCase_ : Optional[Any] = ("This is a simple input", "This is a pair") lowerCAmelCase_ : List[Any] = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] lowerCAmelCase_ : List[str] = tokenizer.pad_token_id lowerCAmelCase_ : Optional[int] = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=30 , return_tensors="""np""" ) lowerCAmelCase_ : Union[str, Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="""np""" ) lowerCAmelCase_ : Dict = tokenizer(*_UpperCAmelCase , padding="""max_length""" , max_length=60 , return_tensors="""np""" ) lowerCAmelCase_ : List[Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors="""np""" ) # s # test single string max_length padding self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s["""input_ids"""] ) self.assertTrue(0 in out_s["""attention_mask"""] ) # s2 # test automatic padding self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] ) self.assertFalse(0 in out_sa["""attention_mask"""][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] ) self.assertTrue(0 in out_sa["""attention_mask"""][1] ) # p # test single pair max_length padding self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p["""input_ids"""] ) self.assertTrue(0 in out_p["""attention_mask"""] ) # p2 # test automatic padding pair self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] ) self.assertFalse(0 in out_pa["""attention_mask"""][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] ) self.assertTrue(0 in out_pa["""attention_mask"""][1] ) def A ( self : Dict ): lowerCAmelCase_ : Optional[Any] = "$$$" lowerCAmelCase_ : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase ) lowerCAmelCase_ : str = "This is a simple input" lowerCAmelCase_ : str = ["This is a simple input 1", "This is a simple input 2"] lowerCAmelCase_ : List[str] = tokenizer.bos_token_id lowerCAmelCase_ : Any = tokenizer(_UpperCAmelCase ) lowerCAmelCase_ : Any = tokenizer(_UpperCAmelCase ) self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowerCAmelCase_ : int = tokenizer.decode(out_s.input_ids ) lowerCAmelCase_ : int = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _UpperCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def A ( self : Any ): lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" ) lowerCAmelCase_ : int = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#" lowerCAmelCase_ : Tuple = "\nif len_a > len_b: result = a\nelse: result = b" lowerCAmelCase_ : Dict = tokenizer.encode(_UpperCAmelCase ) lowerCAmelCase_ : List[str] = ["^#", re.escape("""<|endoftext|>""" ), "^'''", "^\"\"\"", "\n\n\n"] lowerCAmelCase_ : int = tokenizer.decode(_UpperCAmelCase , truncate_before_pattern=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def A ( self : Tuple ): pass
600
from collections.abc import Callable import numpy as np def _a ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE__ : Tuple = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE__ : Tuple = ya SCREAMING_SNAKE_CASE__ : Dict = xa for k in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : List[str] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
663
0
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging _lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case__ ( __lowerCamelCase ): '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase_ : CLIPSegForImageSegmentation , lowerCAmelCase_ : CLIPSegProcessor , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : StableDiffusionSafetyChecker , lowerCAmelCase_ : CLIPImageProcessor , ) -> Union[str, Any]: super().__init__() if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1: UpperCAmelCase_ = ( F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`''' F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ''' "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate('''steps_offset!=1''' , '''1.0.0''' , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = 1 UpperCAmelCase_ = FrozenDict(_UpperCAmelCase ) if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False: UpperCAmelCase_ = ( F'''The configuration file of this scheduler: {scheduler} has not set the configuration''' " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) UpperCAmelCase_ = dict(scheduler.config ) UpperCAmelCase_ = True UpperCAmelCase_ = FrozenDict(_UpperCAmelCase ) if safety_checker is None: logger.warning( F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' ''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered''' ''' results in services or applications open to the public. Both the diffusers team and Hugging Face''' ''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling''' ''' it only for use-cases that involve analyzing network behavior or auditing its results. For more''' ''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' ) self.register_modules( segmentation_model=_UpperCAmelCase , segmentation_processor=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , ) def UpperCamelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_UpperCAmelCase ) def UpperCamelCase ( self : List[Any] ) -> int: self.enable_attention_slicing(_UpperCAmelCase ) def UpperCamelCase ( self : Any ) -> List[str]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) UpperCAmelCase_ = torch.device('''cuda''' ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(_UpperCAmelCase , _UpperCAmelCase ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase ( self : str ) -> List[Any]: if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(_UpperCAmelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : List[Any] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase_ : str , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 5_12 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : float = 7.5 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , **lowerCAmelCase_ : Dict , ) -> List[Any]: UpperCAmelCase_ = self.segmentation_processor( text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device ) UpperCAmelCase_ = self.segmentation_model(**_UpperCAmelCase ) UpperCAmelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() UpperCAmelCase_ = self.numpy_to_pil(_UpperCAmelCase )[0].resize(image.size ) # Run inpainting pipeline with the generated mask UpperCAmelCase_ = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , )
121
def _a ( SCREAMING_SNAKE_CASE__ : List[Any]=2_81_23 ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i SCREAMING_SNAKE_CASE__ : int = set() SCREAMING_SNAKE_CASE__ : Any = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(SCREAMING_SNAKE_CASE__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
663
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCamelCase__: Tuple = logging.get_logger(__name__) UpperCamelCase__: Union[str, Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''adapter_layer''': '''encoder.layers.*.adapter_layer''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', '''pooling_layer.linear''': '''projector''', '''pooling_layer.projection''': '''classifier''', } UpperCamelCase__: Union[str, Any] = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''projector''', '''classifier''', ] def snake_case_ ( _lowerCAmelCase : Tuple ) -> Union[str, Any]: UpperCAmelCase : List[Any] = {} with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : str = line.strip() if line: UpperCAmelCase : str = line.split() UpperCAmelCase : Optional[int] = line_number UpperCAmelCase : str = words[0] UpperCAmelCase : Dict = value return result def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[int]: for attribute in key.split('''.''' ): UpperCAmelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Optional[int] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] UpperCAmelCase : Tuple = "param" if weight_type is not None and weight_type != "param": UpperCAmelCase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase : Dict = hf_pointer for attribute in hf_param_name.split('''.''' ): UpperCAmelCase : Tuple = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Optional[int] = shape_pointer.shape # let's reduce dimension UpperCAmelCase : List[str] = value[0] else: UpperCAmelCase : Optional[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": UpperCAmelCase : List[Any] = value elif weight_type == "weight_v": UpperCAmelCase : str = value elif weight_type == "bias": UpperCAmelCase : str = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): UpperCAmelCase : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[Any] = value else: UpperCAmelCase : Optional[int] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> List[Any]: UpperCAmelCase : Dict = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]] UpperCAmelCase : str = "param" if weight_type is not None and weight_type != "param": UpperCAmelCase : Dict = ".".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase : Union[str, Any] = ".".join([key, hf_param_name] ) else: UpperCAmelCase : Dict = key UpperCAmelCase : Optional[Any] = value if "lm_head" in full_key else value[0] UpperCamelCase__: Optional[int] = { '''W_a''': '''linear_1.weight''', '''W_b''': '''linear_2.weight''', '''b_a''': '''linear_1.bias''', '''b_b''': '''linear_2.bias''', '''ln_W''': '''norm.weight''', '''ln_b''': '''norm.bias''', } def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Optional[Any]=None ) -> Any: UpperCAmelCase : Any = False for key, mapped_key in MAPPING.items(): UpperCAmelCase : Dict = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: UpperCAmelCase : Dict = True if "*" in mapped_key: UpperCAmelCase : Any = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2] UpperCAmelCase : int = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase : Tuple = "weight_g" elif "weight_v" in name: UpperCAmelCase : int = "weight_v" elif "bias" in name: UpperCAmelCase : Tuple = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase : Union[str, Any] = "weight" else: UpperCAmelCase : List[str] = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return is_used return is_used def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> int: UpperCAmelCase : List[str] = [] UpperCAmelCase : List[str] = fairseq_model.state_dict() UpperCAmelCase : Tuple = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase : List[Any] = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase : Any = True else: UpperCAmelCase : int = load_wavaveca_layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ) -> Dict: UpperCAmelCase : str = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase : Optional[int] = name.split('''.''' ) UpperCAmelCase : Union[str, Any] = int(items[0] ) UpperCAmelCase : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase : Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase : Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=False ) -> Dict: if config_path is not None: UpperCAmelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase : Optional[int] = WavaVecaConfig() if is_seq_class: UpperCAmelCase : str = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Optional[int] = idalabel UpperCAmelCase : List[str] = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase : Optional[Any] = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase : Optional[int] = target_dict.pad_index UpperCAmelCase : List[Any] = target_dict.bos_index UpperCAmelCase : Tuple = target_dict.eos_index UpperCAmelCase : Tuple = len(target_dict.symbols ) UpperCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Union[str, Any] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase : List[Any] = 0 UpperCAmelCase : Dict = 1 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[Any] = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase : int = True if config.feat_extract_norm == "layer" else False UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase : Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Union[str, Any] = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase : Dict = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: UpperCAmelCase : int = argparse.Namespace(task='''audio_pretraining''' ) UpperCAmelCase : Any = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Any = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCamelCase__: Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) UpperCamelCase__: str = parser.parse_args() UpperCamelCase__: Union[str, Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
127
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : Optional[Any] = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor'''] _lowerCamelCase : List[str] = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
663
0
'''simple docstring''' from collections import defaultdict from math import gcd def _lowercase ( lowerCamelCase__ : int = 1_500_000 ): _a = defaultdict(SCREAMING_SNAKE_CASE__ ) _a = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1, SCREAMING_SNAKE_CASE__, 2 ): if gcd(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) > 1: continue _a = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(SCREAMING_SNAKE_CASE__, limit + 1, SCREAMING_SNAKE_CASE__ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f'''{solution() = }''')
131
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowerCamelCase : """simple docstring""" UpperCAmelCase_ = BlenderbotSmallConfig UpperCAmelCase_ = {} UpperCAmelCase_ = "gelu" def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = is_training SCREAMING_SNAKE_CASE__ : List[Any] = use_labels SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Any = num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE__ : Any = eos_token_id SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id def A_ ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Any = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) return config, inputs_dict def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder() SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"] SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :] SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :] SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"] SCREAMING_SNAKE_CASE__ : Tuple = 1 # first forward pass SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size ) SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 ) SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 ) def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase_ = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase_ = True UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self ) SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase ) def A_ ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A_ ( self : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_tokenizers @require_tf class lowerCamelCase (unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] UpperCAmelCase_ = "facebook/blenderbot_small-90M" @cached_property def A_ ( self : Dict ) -> Optional[Any]: """simple docstring""" # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def A_ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def A_ ( self : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
663
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = '''▁''' __SCREAMING_SNAKE_CASE = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} __SCREAMING_SNAKE_CASE = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } __SCREAMING_SNAKE_CASE = {'''vinai/bartpho-syllable''': 1_0_2_4} class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["input_ids", "attention_mask"] def __init__( self : int , A__ : Dict , A__ : Tuple , A__ : Any="<s>" , A__ : List[str]="</s>" , A__ : List[str]="</s>" , A__ : List[Any]="<s>" , A__ : Dict="<unk>" , A__ : Tuple="<pad>" , A__ : int="<mask>" , A__ : Optional[Dict[str, Any]] = None , **A__ : Any , ) -> None: '''simple docstring''' a__ : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token a__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) a__ : Union[str, Any] = vocab_file a__ : Optional[int] = monolingual_vocab_file a__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility a__ : List[Any] = {} a__ : Optional[int] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids: a__ : Dict = cnt cnt += 1 with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): a__ : int = line.strip().split()[0] a__ : Tuple = len(self.fairseq_tokens_to_ids ) if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids: a__ : List[Any] = len(self.fairseq_tokens_to_ids ) a__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> int: '''simple docstring''' a__ : List[Any] = self.__dict__.copy() a__ : Any = None a__ : int = self.sp_model.serialized_model_proto() return state def __setstate__( self : int , A__ : Optional[int] ) -> int: '''simple docstring''' a__ : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a__ : List[Any] = {} a__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self : Optional[int] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a__ : Any = [self.cls_token_id] a__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self : List[str] , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def __lowerCAmelCase ( self : Optional[int] , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' a__ : Union[str, Any] = [self.sep_token_id] a__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCAmelCase ( self : Any ) -> List[str]: '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __lowerCAmelCase ( self : Tuple ) -> str: '''simple docstring''' a__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self : Tuple , A__ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def __lowerCAmelCase ( self : List[str] , A__ : Union[str, Any] ) -> Tuple: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __lowerCAmelCase ( self : List[str] , A__ : str ) -> str: '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __lowerCAmelCase ( self : Optional[Any] , A__ : List[str] ) -> Optional[int]: '''simple docstring''' a__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase , ''' ''' ).strip() return out_string def __lowerCAmelCase ( self : Tuple , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_UpperCAmelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return a__ : Optional[int] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) a__ : List[str] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , '''wb''' ) as fi: a__ : int = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F'{str(_UpperCAmelCase )} \n' ) return out_vocab_file, out_monolingual_vocab_file
688
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = (DPMSolverSDEScheduler,) UpperCAmelCase_ = 10 def A_ ( self : List[str], **_UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "noise_sampler_seed": 0, } config.update(**_UpperCAmelCase ) return config def A_ ( self : Tuple ) -> int: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def A_ ( self : int ) -> int: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase ) def A_ ( self : List[Any] ) -> List[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def A_ ( self : Optional[int] ) -> int: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def A_ ( self : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : int = self.dummy_model() SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : Dict = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def A_ ( self : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config(prediction_type="v_prediction" ) SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def A_ ( self : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def A_ ( self : str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_UpperCAmelCase, use_karras_sigmas=_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = self.dummy_model() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
663
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase__ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''EncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''TFEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''FlaxEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
486
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = ["image_processor", "tokenizer"] UpperCAmelCase_ = "AutoImageProcessor" UpperCAmelCase_ = "AutoTokenizer" def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", _UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : int = self.image_processor SCREAMING_SNAKE_CASE__ : Any = False def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : Optional[int] = args[0] SCREAMING_SNAKE_CASE__ : str = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase ) if text is not None: SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"] return inputs def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase ) def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any: """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase ) @contextmanager def A_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer yield SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any: """simple docstring""" if added_vocab is None: SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab() SCREAMING_SNAKE_CASE__ : str = {} while tokens: SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE ) if start_token is None: break SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 ) SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE ) SCREAMING_SNAKE_CASE__ : Any = start_token.group() if end_token is None: SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group() SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE ) if content is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase ) if value: if len(_UpperCAmelCase ) == 1: SCREAMING_SNAKE_CASE__ : str = value[0] SCREAMING_SNAKE_CASE__ : List[str] = value else: # leaf nodes SCREAMING_SNAKE_CASE__ : Optional[int] = [] for leaf in content.split(r"<sep/>" ): SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens output[key].append(_UpperCAmelCase ) if len(output[key] ) == 1: SCREAMING_SNAKE_CASE__ : str = output[key][0] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase ) if len(_UpperCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def A_ ( self : str ) -> Optional[Any]: """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, ) return self.image_processor_class @property def A_ ( self : int ) -> List[str]: """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, ) return self.image_processor
663
0
lowerCAmelCase = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowerCAmelCase = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
462
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCamelCase : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = ['''BartphoTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
663
0
import math from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''', # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class _lowerCamelCase ( __lowerCamelCase ): """simple docstring""" snake_case = "data2vec-audio" def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=19 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=0.0_5 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="sum" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , _SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )->List[str]: '''simple docstring''' super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) A_ : Optional[Any] = hidden_size A_ : Optional[int] = feat_extract_activation A_ : str = list(_UpperCAmelCase ) A_ : Union[str, Any] = list(_UpperCAmelCase ) A_ : str = list(_UpperCAmelCase ) A_ : Tuple = conv_bias A_ : Optional[Any] = num_conv_pos_embeddings A_ : Optional[int] = num_conv_pos_embedding_groups A_ : Optional[int] = conv_pos_kernel_size A_ : Optional[Any] = len(self.conv_dim ) A_ : List[str] = num_hidden_layers A_ : List[Any] = intermediate_size A_ : Tuple = hidden_act A_ : int = num_attention_heads A_ : str = hidden_dropout A_ : Optional[int] = attention_dropout A_ : List[str] = activation_dropout A_ : Optional[Any] = feat_proj_dropout A_ : List[str] = final_dropout A_ : Any = layerdrop A_ : Tuple = layer_norm_eps A_ : Optional[int] = initializer_range A_ : Optional[Any] = vocab_size A_ : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : Union[str, Any] = mask_time_prob A_ : Tuple = mask_time_length A_ : Optional[int] = mask_time_min_masks A_ : Union[str, Any] = mask_feature_prob A_ : int = mask_feature_length A_ : List[str] = mask_feature_min_masks # ctc loss A_ : List[str] = ctc_loss_reduction A_ : Tuple = ctc_zero_infinity # adapter A_ : Dict = add_adapter A_ : List[Any] = adapter_kernel_size A_ : Any = adapter_stride A_ : Optional[Any] = num_adapter_layers A_ : Tuple = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. A_ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A_ : Optional[int] = list(_UpperCAmelCase ) A_ : Any = list(_UpperCAmelCase ) A_ : Dict = list(_UpperCAmelCase ) A_ : Dict = xvector_output_dim @property def _snake_case ( self )->int: '''simple docstring''' return math.prod(self.conv_stride )
590
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase : """simple docstring""" def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = image_size SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = num_channels SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim SCREAMING_SNAKE_CASE__ : List[Any] = depths SCREAMING_SNAKE_CASE__ : List[str] = num_heads SCREAMING_SNAKE_CASE__ : str = window_size SCREAMING_SNAKE_CASE__ : Any = mlp_ratio SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate SCREAMING_SNAKE_CASE__ : Dict = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings SCREAMING_SNAKE_CASE__ : Tuple = patch_norm SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = is_training SCREAMING_SNAKE_CASE__ : List[Any] = scope SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride SCREAMING_SNAKE_CASE__ : List[Any] = out_features SCREAMING_SNAKE_CASE__ : Dict = out_indices def A_ ( self : List[str] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : str = None if self.use_labels: SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config() return config, pixel_values, labels def A_ ( self : Optional[int] ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, ) def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) ) def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(_UpperCAmelCase ): SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"] SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase ) def A_ ( self : str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self ) SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def A_ ( self : Any ) -> List[Any]: """simple docstring""" pass def A_ ( self : Tuple ) -> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ ( self : int ) -> Optional[Any]: """simple docstring""" return def A_ ( self : Any ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def A_ ( self : List[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) @unittest.skip("Swin does not use inputs_embeds" ) def A_ ( self : Any ) -> Optional[int]: """simple docstring""" pass @unittest.skip("Swin does not support feedforward chunking" ) def A_ ( self : List[str] ) -> Optional[int]: """simple docstring""" pass def A_ ( self : Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) ) def A_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1], _UpperCAmelCase ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def A_ ( self : Dict ) -> List[str]: """simple docstring""" pass def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase ) # Swin has a different seq_length SCREAMING_SNAKE_CASE__ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], ) def A_ ( self : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) def A_ ( self : Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Optional[int] = 3 SCREAMING_SNAKE_CASE__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) SCREAMING_SNAKE_CASE__ : str = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Any = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def A_ ( self : List[Any] ) -> Dict: """simple docstring""" pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def A_ ( self : Dict ) -> str: """simple docstring""" pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def A_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" pass def A_ ( self : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ): SCREAMING_SNAKE_CASE__ : Dict = 0 return t def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ): with torch.no_grad(): SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple() def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ): if isinstance(_UpperCAmelCase, (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ): recursive_check(_UpperCAmelCase, _UpperCAmelCase ) elif isinstance(_UpperCAmelCase, _UpperCAmelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(_UpperCAmelCase, _UpperCAmelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=( "Tuple and dict output are not equal. Difference:" F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has''' F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.''' ), ) recursive_check(_UpperCAmelCase, _UpperCAmelCase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} ) SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} ) @require_torch class lowerCamelCase (unittest.TestCase , __lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else () UpperCAmelCase_ = MaskFormerSwinConfig def A_ ( self : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self ) def A_ ( self : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase ) backbone.to(_UpperCAmelCase ) backbone.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ): self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase ) self.assertIsNotNone(outputs.attentions )
663
0
def _lowercase ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ) -> Any: '''simple docstring''' print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) ,end='''\t''' ) else: print('''INF''' ,end='''\t''' ) print() def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : Any ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ : Optional[int] = [[float('''inf''' ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(SCREAMING_SNAKE_CASE__ ): UpperCamelCase__ : Union[str, Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(SCREAMING_SNAKE_CASE__ ): # looping through rows of graph array for i in range(SCREAMING_SNAKE_CASE__ ): # looping through columns of graph array for j in range(SCREAMING_SNAKE_CASE__ ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): UpperCamelCase__ : int = dist[i][k] + dist[k][j] _print_dist(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) return dist, v if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Optional[Any] = int(input("""Enter number of vertices: """)) _SCREAMING_SNAKE_CASE : Optional[Any] = int(input("""Enter number of edges: """)) _SCREAMING_SNAKE_CASE : Dict = [[float("""inf""") for i in range(v)] for j in range(v)] for i in range(v): _SCREAMING_SNAKE_CASE : Tuple = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("""\nEdge """, i + 1) _SCREAMING_SNAKE_CASE : Optional[int] = int(input("""Enter source:""")) _SCREAMING_SNAKE_CASE : Optional[int] = int(input("""Enter destination:""")) _SCREAMING_SNAKE_CASE : Union[str, Any] = float(input("""Enter weight:""")) _SCREAMING_SNAKE_CASE : Tuple = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
344
from ....configuration_utils import PretrainedConfig from ....utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) # TODO: upload to AWS _lowerCamelCase : str = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = "retribert" def __init__( self : Optional[Any], _UpperCAmelCase : Dict=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : Tuple=8, _UpperCAmelCase : Optional[Any]=1_2, _UpperCAmelCase : Union[str, Any]=3_0_7_2, _UpperCAmelCase : Dict="gelu", _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : List[str]=5_1_2, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Dict=0.02, _UpperCAmelCase : Any=1E-12, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Any=1_2_8, _UpperCAmelCase : int=0, **_UpperCAmelCase : List[str], ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = vocab_size SCREAMING_SNAKE_CASE__ : str = hidden_size SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = hidden_act SCREAMING_SNAKE_CASE__ : int = intermediate_size SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : List[str] = initializer_range SCREAMING_SNAKE_CASE__ : int = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[Any] = share_encoders SCREAMING_SNAKE_CASE__ : int = projection_dim
663
0
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __UpperCamelCase ( unittest.TestCase ): @property def __a ( self ) -> Dict: torch.manual_seed(0 ) a : Union[str, Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model @property def __a ( self ) -> List[Any]: torch.manual_seed(0 ) a : Dict = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , ) return model @property def __a ( self ) -> Any: torch.manual_seed(0 ) a : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(_UpperCAmelCase ) def __a ( self ) -> Any: a : Union[str, Any] = self.dummy_uncond_unet a : Dict = DDIMScheduler() a : Dict = self.dummy_vq_model a : int = LDMPipeline(unet=_UpperCAmelCase , vqvae=_UpperCAmelCase , scheduler=_UpperCAmelCase ) ldm.to(_UpperCAmelCase ) ldm.set_progress_bar_config(disable=_UpperCAmelCase ) a : Optional[Any] = torch.manual_seed(0 ) a : List[Any] = ldm(generator=_UpperCAmelCase , num_inference_steps=2 , output_type="numpy" ).images a : str = torch.manual_seed(0 ) a : str = ldm(generator=_UpperCAmelCase , num_inference_steps=2 , output_type="numpy" , return_dict=_UpperCAmelCase )[0] a : Union[str, Any] = image[0, -3:, -3:, -1] a : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a : List[Any] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] ) a : Union[str, Any] = 1E-2 if torch_device != "mps" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> str: a : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" ) ldm.to(_UpperCAmelCase ) ldm.set_progress_bar_config(disable=_UpperCAmelCase ) a : int = torch.manual_seed(0 ) a : Dict = ldm(generator=_UpperCAmelCase , num_inference_steps=5 , output_type="numpy" ).images a : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) a : List[str] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] ) a : List[str] = 1E-2 if torch_device != "mps" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
633
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCamelCase : int = False @skip_mps class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline UpperCAmelCase_ = False UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} ) UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def A_ ( cls : str ) -> Union[str, Any]: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) @classmethod def A_ ( cls : Tuple ) -> str: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) def A_ ( self : Any ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL( block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_2_8, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act="gelu", projection_dim=5_1_2, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModel(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=0 ) -> Optional[Any]: """simple docstring""" if str(_UpperCAmelCase ).startswith("mps" ): SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_UpperCAmelCase ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = { "prompt": "a cat and a frog", "token_indices": [2, 5], "generator": generator, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", "max_iter_to_alter": 2, "thresholds": {0: 0.7}, } return inputs def A_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = "cpu" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = pipe(**_UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 6_4, 6_4, 3) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) SCREAMING_SNAKE_CASE__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_UpperCAmelCase, 1E-3 ) def A_ ( self : str ) -> List[Any]: """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def A_ ( self : Any ) -> str: """simple docstring""" # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A_ ( self : Optional[Any] ) -> str: """simple docstring""" self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 ) def A_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A_ ( self : Any ) -> List[str]: """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().test_save_load_local(expected_max_difference=5E-4 ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class lowerCamelCase (unittest.TestCase ): """simple docstring""" @classmethod def A_ ( cls : Union[str, Any] ) -> Tuple: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) @classmethod def A_ ( cls : List[str] ) -> List[str]: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) def A_ ( self : str ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(5_1 ) SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=_UpperCAmelCase, torch_dtype=torch.floataa ) pipe.to("cuda" ) SCREAMING_SNAKE_CASE__ : List[str] = "a painting of an elephant with glasses" SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 7] SCREAMING_SNAKE_CASE__ : str = pipe( prompt=_UpperCAmelCase, token_indices=_UpperCAmelCase, guidance_scale=7.5, generator=_UpperCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0] SCREAMING_SNAKE_CASE__ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" ) assert np.abs((expected_image - image).max() ) < 5E-1
663
0
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : list , lowerCamelCase : list , lowerCamelCase : int): A_ : Any = len(SCREAMING_SNAKE_CASE__) A_ : Union[str, Any] = [[0] * n for i in range(SCREAMING_SNAKE_CASE__)] for i in range(SCREAMING_SNAKE_CASE__): A_ : Optional[Any] = y_points[i] for i in range(2 , SCREAMING_SNAKE_CASE__): for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__): A_ : Tuple = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
665
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class lowerCamelCase : """simple docstring""" UpperCAmelCase_ = PegasusConfig UpperCAmelCase_ = {} UpperCAmelCase_ = "gelu" def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=4_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=1, _UpperCAmelCase : str=0, ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = seq_length SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : Tuple = vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : str = eos_token_id SCREAMING_SNAKE_CASE__ : Dict = pad_token_id SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id def A_ ( self : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 ) SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) SCREAMING_SNAKE_CASE__ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) return config, inputs_dict def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder() SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["input_ids"] SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :] SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["attention_mask"][:1, :] SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["head_mask"] SCREAMING_SNAKE_CASE__ : int = 1 # first forward pass SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3), config.vocab_size ) SCREAMING_SNAKE_CASE__ : str = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 ) def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Any: '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase_ = ( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase_ = True UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : List[str] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = TFPegasusModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase ) def A_ ( self : Optional[Any] ) -> int: """simple docstring""" self.config_tester.run_common_tests() def A_ ( self : str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class lowerCamelCase (unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] UpperCAmelCase_ = [ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCAmelCase_ = "google/pegasus-xsum" @cached_property def A_ ( self : Union[str, Any] ) -> int: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def A_ ( self : str, **_UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.translate_src_text(**_UpperCAmelCase ) assert self.expected_text == generated_words def A_ ( self : Any, **_UpperCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, **_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="tf" ) SCREAMING_SNAKE_CASE__ : List[str] = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def A_ ( self : List[Any] ) -> Any: """simple docstring""" self._assert_generated_batch_equal_expected()
663
0
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class __a ( unittest.TestCase ): @property def A ( self : List[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def A ( self : Union[str, Any] ): lowerCAmelCase_ : int = ort.SessionOptions() lowerCAmelCase_ : Any = False return options def A ( self : Union[str, Any] ): lowerCAmelCase_ : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) lowerCAmelCase_ : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) lowerCAmelCase_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default lowerCAmelCase_ : Tuple = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) lowerCAmelCase_ : List[str] = "A red cat sitting on a park bench" lowerCAmelCase_ : Optional[int] = np.random.RandomState(0 ) lowerCAmelCase_ : Tuple = pipe( prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_UpperCAmelCase , output_type="""np""" , ) lowerCAmelCase_ : Union[str, Any] = output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-2
600
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCamelCase : List[str] = { '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
663
0
from __future__ import annotations from typing import Any class snake_case__ : '''simple docstring''' def __init__( self : Dict , lowerCAmelCase_ : int = 6 ) -> None: UpperCAmelCase_ = None UpperCAmelCase_ = None self.create_linked_list(_UpperCAmelCase ) def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : int ) -> None: UpperCAmelCase_ = Node() UpperCAmelCase_ = current_node UpperCAmelCase_ = current_node UpperCAmelCase_ = current_node for _ in range(1 , _UpperCAmelCase ): UpperCAmelCase_ = Node() UpperCAmelCase_ = current_node UpperCAmelCase_ = previous_node UpperCAmelCase_ = current_node UpperCAmelCase_ = self.front UpperCAmelCase_ = previous_node def UpperCamelCase ( self : Union[str, Any] ) -> bool: return ( self.front == self.rear and self.front is not None and self.front.data is None ) def UpperCamelCase ( self : str ) -> Any | None: self.check_can_perform_operation() return self.front.data if self.front else None def UpperCamelCase ( self : Dict , lowerCAmelCase_ : Any ) -> None: if self.rear is None: return self.check_is_full() if not self.is_empty(): UpperCAmelCase_ = self.rear.next if self.rear: UpperCAmelCase_ = data def UpperCamelCase ( self : List[Any] ) -> Any: self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: UpperCAmelCase_ = self.front.data UpperCAmelCase_ = None return data UpperCAmelCase_ = self.front UpperCAmelCase_ = old_front.next UpperCAmelCase_ = old_front.data UpperCAmelCase_ = None return data def UpperCamelCase ( self : List[Any] ) -> None: if self.is_empty(): raise Exception('''Empty Queue''' ) def UpperCamelCase ( self : List[str] ) -> None: if self.rear and self.rear.next == self.front: raise Exception('''Full Queue''' ) class snake_case__ : '''simple docstring''' def __init__( self : str ) -> None: UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if __name__ == "__main__": import doctest doctest.testmod()
121
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip''' def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our RoBERTa config:" , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : str = roberta.model.classification_heads["mnli"].dense.weight SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.bias SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["mnli"].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) ) else: SCREAMING_SNAKE_CASE__ : int = roberta.model(SCREAMING_SNAKE_CASE__ )[0] print(our_output.shape , their_output.shape ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowerCamelCase : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
663
0
'''simple docstring''' import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class SCREAMING_SNAKE_CASE( __lowerCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = WavaVecaPhonemeCTCTokenizer lowerCamelCase__ = False def A ( self : int ) -> Dict: super().setUp() UpperCAmelCase : List[Any] = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " "əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ " "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(''' ''' ) UpperCAmelCase : Optional[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) UpperCAmelCase : Union[str, Any] = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' ) def A ( self : Dict , __snake_case : Any , __snake_case : Any=False , __snake_case : Any=20 , __snake_case : Union[str, Any]=5 ) -> Tuple[str, list]: UpperCAmelCase : Optional[Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )) for i in range(len(_UpperCAmelCase ) )] UpperCAmelCase : Dict = list(filter(lambda __snake_case : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_UpperCAmelCase ) , _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: UpperCAmelCase : Optional[int] = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: UpperCAmelCase : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase : Optional[int] = [t[0] for t in toks] # Ensure consistency UpperCAmelCase : Optional[int] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: UpperCAmelCase : Optional[int] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: UpperCAmelCase : Any = " " + output_txt UpperCAmelCase : Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def A ( self : List[Any] , **__snake_case : Union[str, Any] ) -> Tuple: kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def A ( self : Optional[int] ) -> Dict: UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) UpperCAmelCase : List[Any] = tokenizer('''m xxx ɪ''' , do_phonemize=_UpperCAmelCase ).input_ids self.assertEqual(_UpperCAmelCase , [13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) UpperCAmelCase : List[Any] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=_UpperCAmelCase ).input_ids self.assertEqual(_UpperCAmelCase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa UpperCAmelCase : Dict = tokenizer('''maɪ c''' , do_phonemize=_UpperCAmelCase ).input_ids self.assertEqual(_UpperCAmelCase , [3, 200] ) # mai should be <unk> (=3) def A ( self : str ) -> int: UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) UpperCAmelCase : int = "Hello how are you" UpperCAmelCase : Optional[Any] = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(_UpperCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def A ( self : Tuple ) -> Union[str, Any]: UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) UpperCAmelCase : List[Any] = "Hello how are you" UpperCAmelCase : int = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(_UpperCAmelCase ).input_ids , tokenizer(_UpperCAmelCase , do_phonemize=_UpperCAmelCase ).input_ids ) def A ( self : Optional[Any] ) -> int: UpperCAmelCase : str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) UpperCAmelCase : Tuple = "Hello how are you" UpperCAmelCase : List[Any] = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''' ) UpperCAmelCase : List[Any] = tokenizer.decode(tokenizer(_UpperCAmelCase ).input_ids ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def A ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) UpperCAmelCase : List[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] UpperCAmelCase : List[str] = tokenizer.decode(sample_ids[0] ) UpperCAmelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , batch_tokens[0] ) self.assertEqual(_UpperCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def A ( self : Union[str, Any] ) -> List[Any]: UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) UpperCAmelCase : Optional[int] = "Hello how are you" UpperCAmelCase : Optional[Any] = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(_UpperCAmelCase , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def A ( self : str ) -> int: UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) UpperCAmelCase : Optional[int] = "Hello how are you" UpperCAmelCase : Optional[int] = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(_UpperCAmelCase ).input_ids , tokenizer(_UpperCAmelCase , do_phonemize=_UpperCAmelCase ).input_ids ) def A ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase : str = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off UpperCAmelCase : Dict = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter UpperCAmelCase : Union[str, Any] = tokenizer.decode(sample_ids[0] ) UpperCAmelCase : Any = tokenizer.batch_decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , batch_tokens[0] ) self.assertEqual(_UpperCAmelCase , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter UpperCAmelCase : Any = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_UpperCAmelCase ) UpperCAmelCase : str = tokenizer.batch_decode(_UpperCAmelCase , filter_word_delimiter_token=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , batch_tokens[0] ) self.assertEqual(_UpperCAmelCase , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def A ( self : Any ) -> Tuple: UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) UpperCAmelCase : Optional[int] = "Hello how are you" UpperCAmelCase : Optional[int] = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''' ) UpperCAmelCase : List[str] = tokenizer.decode(tokenizer(_UpperCAmelCase ).input_ids , filter_word_delimiter_token=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def A ( self : Any ) -> Optional[int]: UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) UpperCAmelCase : str = "Hello how are you" UpperCAmelCase : Optional[Any] = tokenizer.phonemize(_UpperCAmelCase , phonemizer_lang='''en-us''' ) UpperCAmelCase : List[str] = tokenizer.decode(tokenizer(_UpperCAmelCase ).input_ids , filter_word_delimiter_token=_UpperCAmelCase ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , _UpperCAmelCase ) def A ( self : Tuple ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=_UpperCAmelCase ) UpperCAmelCase : str = "Hello how are you" UpperCAmelCase : Dict = tokenizer(_UpperCAmelCase , phonemizer_lang='''en-us''' ).input_ids UpperCAmelCase : Tuple = tokenizer(_UpperCAmelCase , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase : Dict = tokenizer.decode(_UpperCAmelCase ) UpperCAmelCase : Optional[int] = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(_UpperCAmelCase , '''ɛ l o h aʊ a ʁ j u''' ) def A ( self : Tuple ) -> Any: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) UpperCAmelCase : Tuple = "Hello how Are you" UpperCAmelCase : Any = "hello how are you" UpperCAmelCase : Dict = tokenizer(_UpperCAmelCase ).input_ids UpperCAmelCase : Optional[int] = tokenizer(_UpperCAmelCase ).input_ids self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def A ( self : List[str] ) -> Any: UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off UpperCAmelCase : List[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on UpperCAmelCase : Any = tokenizer.batch_decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def A ( __snake_case : Any , __snake_case : Tuple ) -> Tuple: UpperCAmelCase : Tuple = [d[key] for d in offsets] return retrieved_list def A ( self : Any ) -> Any: UpperCAmelCase : List[Any] = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" UpperCAmelCase : Any = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on UpperCAmelCase : List[Any] = tokenizer.decode(_UpperCAmelCase , output_char_offsets=_UpperCAmelCase , filter_word_delimiter_token=_UpperCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def A ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase : Dict = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(__snake_case : int , __snake_case : Optional[Any] ): self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(isinstance(outputs_list[0] , _UpperCAmelCase ) ) # transform list to ModelOutput UpperCAmelCase : int = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(__snake_case : Optional[Any] , __snake_case : Dict ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): [recursive_check(_UpperCAmelCase , _UpperCAmelCase ) for la, la in zip(_UpperCAmelCase , _UpperCAmelCase )] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off UpperCAmelCase : List[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char UpperCAmelCase : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase , output_char_offsets=_UpperCAmelCase ) UpperCAmelCase : Optional[Any] = [tokenizer.decode(_UpperCAmelCase , output_char_offsets=_UpperCAmelCase ) for ids in sample_ids] check_list_tuples_equal(_UpperCAmelCase , _UpperCAmelCase ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def A ( self : List[str] ) -> int: pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def A ( self : Union[str, Any] ) -> List[str]: pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def A ( self : Union[str, Any] ) -> int: pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def A ( self : int ) -> str: pass def A ( self : int ) -> Dict: UpperCAmelCase : int = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCAmelCase : Optional[Any] = tokenizer.vocab_size UpperCAmelCase : List[str] = len(_UpperCAmelCase ) self.assertNotEqual(_UpperCAmelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) UpperCAmelCase : Tuple = ["aaaaa bbbbbb", "cccccccccdddddddd"] UpperCAmelCase : Any = tokenizer.add_tokens(_UpperCAmelCase ) UpperCAmelCase : Tuple = tokenizer.vocab_size UpperCAmelCase : Dict = len(_UpperCAmelCase ) self.assertNotEqual(_UpperCAmelCase , 0 ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) ) UpperCAmelCase : List[str] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_UpperCAmelCase ) self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) UpperCAmelCase : Optional[int] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} UpperCAmelCase : Tuple = tokenizer.add_special_tokens(_UpperCAmelCase ) UpperCAmelCase : List[str] = tokenizer.vocab_size UpperCAmelCase : str = len(_UpperCAmelCase ) self.assertNotEqual(_UpperCAmelCase , 0 ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) ) UpperCAmelCase : Optional[int] = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_UpperCAmelCase ) self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def A ( self : Tuple ) -> Union[str, Any]: pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def A ( self : Tuple ) -> Dict: pass def A ( self : List[str] ) -> Any: UpperCAmelCase : Any = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCAmelCase : List[str] = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(output['''text'''] , _UpperCAmelCase )
127
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = "Wav2Vec2FeatureExtractor" UpperCAmelCase_ = "AutoTokenizer" def __init__( self : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> List[str]: """simple docstring""" super().__init__(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor SCREAMING_SNAKE_CASE__ : Union[str, Any] = False @classmethod def A_ ( cls : int, _UpperCAmelCase : Dict, **_UpperCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" try: return super().from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) except OSError: warnings.warn( F'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: ", _UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : int = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) return cls(feature_extractor=_UpperCAmelCase, tokenizer=_UpperCAmelCase ) def __call__( self : Optional[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("raw_speech" ) else: SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("audio", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("sampling_rate", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("text", _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0] SCREAMING_SNAKE_CASE__ : Tuple = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_UpperCAmelCase, *_UpperCAmelCase, sampling_rate=_UpperCAmelCase, **_UpperCAmelCase ) if text is not None: SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE__ : List[str] = encodings["input_ids"] return inputs def A_ ( self : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("input_features", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("labels", _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0] SCREAMING_SNAKE_CASE__ : Dict = args[1:] if input_features is not None: SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor.pad(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase ) if labels is not None: SCREAMING_SNAKE_CASE__ : str = self.tokenizer.pad(_UpperCAmelCase, **_UpperCAmelCase ) if labels is None: return input_features elif input_features is None: return labels else: SCREAMING_SNAKE_CASE__ : List[str] = labels["input_ids"] return input_features def A_ ( self : Union[str, Any], *_UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase ) def A_ ( self : Optional[int], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase ) @contextmanager def A_ ( self : Optional[int] ) -> Any: """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : int = self.tokenizer yield SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor SCREAMING_SNAKE_CASE__ : Optional[Any] = False
663
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Tuple, lowerCamelCase__ : List[str], lowerCamelCase__ : Tuple ): _a = s.rsplit(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) return new.join(SCREAMING_SNAKE_CASE__ ) def _lowercase ( lowerCamelCase__ : str ): return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def _lowercase ( lowerCamelCase__ : List[Any] ): _a = {} _a = ["group_1", "group_2", "group_3", "group_4"] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: _a = key.replace(F'''{group_key}.''', F'''{group_key}.group.''' ) if "res_path" in key: _a = key.replace("res_path.", "res_path.path." ) if key.endswith(".w" ): _a = rreplace(SCREAMING_SNAKE_CASE__, ".w", ".weight", 1 ) if key.endswith(".b" ): _a = rreplace(SCREAMING_SNAKE_CASE__, ".b", ".bias", 1 ) _a = value.float() return upgrade @torch.no_grad() def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict=None, lowerCamelCase__ : Any=True ): from dall_e import Encoder _a = Encoder() if os.path.exists(SCREAMING_SNAKE_CASE__ ): _a = torch.load(SCREAMING_SNAKE_CASE__ ) else: _a = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): _a = ckpt.state_dict() encoder.load_state_dict(SCREAMING_SNAKE_CASE__ ) if config_path is not None: _a = FlavaImageCodebookConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: _a = FlavaImageCodebookConfig() _a = FlavaImageCodebook(SCREAMING_SNAKE_CASE__ ).eval() _a = encoder.state_dict() _a = upgrade_state_dict(SCREAMING_SNAKE_CASE__ ) hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ ) _a = hf_model.state_dict() _a = count_parameters(SCREAMING_SNAKE_CASE__ ) _a = count_parameters(SCREAMING_SNAKE_CASE__ ) assert torch.allclose(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) else: return hf_state_dict if __name__ == "__main__": __snake_case : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __snake_case : int = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
131
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
663
0
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : Any ) -> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCAmelCase ( self : Tuple ) -> int: '''simple docstring''' a__ : Union[str, Any] = 1 a__ : List[str] = 3 a__ : Union[str, Any] = (3_2, 3_2) a__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCAmelCase ) return image @property def __lowerCAmelCase ( self : Any ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) a__ : Any = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) return model @property def __lowerCAmelCase ( self : List[str] ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) a__ : Dict = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) a__ : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(_UpperCAmelCase ) @property def __lowerCAmelCase ( self : Dict ) -> int: '''simple docstring''' def extract(*A__ : Optional[Any] , **A__ : str ): class lowerCAmelCase__ : """simple docstring""" def __init__( self : List[str] ) -> int: '''simple docstring''' a__ : str = torch.ones([0] ) def __lowerCAmelCase ( self : Optional[Any] , A__ : Union[str, Any] ) -> str: '''simple docstring''' self.pixel_values.to(_UpperCAmelCase ) return self return Out() return extract def __lowerCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' a__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a__ : Union[str, Any] = self.dummy_cond_unet a__ : Optional[int] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) a__ : Union[str, Any] = self.dummy_vae a__ : Dict = self.dummy_text_encoder a__ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk a__ : Union[str, Any] = StableDiffusionPipeline( unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , ) a__ : Optional[Any] = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) a__ : List[Any] = "A painting of a squirrel eating a burger" a__ : str = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) a__ : List[str] = sd_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) a__ : Dict = output.images a__ : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) a__ : Optional[int] = sd_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=_UpperCAmelCase , )[0] a__ : Optional[int] = image[0, -3:, -3:, -1] a__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a__ : int = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self : int ) -> List[Any]: '''simple docstring''' a__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator a__ : Any = self.dummy_cond_unet a__ : str = PNDMScheduler(skip_prk_steps=_UpperCAmelCase ) a__ : Optional[int] = self.dummy_vae a__ : Optional[Any] = self.dummy_text_encoder a__ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk a__ : Tuple = StableDiffusionPipeline( unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , ) a__ : Optional[Any] = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) a__ : Any = "A painting of a squirrel eating a burger" a__ : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) a__ : int = sd_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) a__ : Any = output.images a__ : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) a__ : Any = sd_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=_UpperCAmelCase , )[0] a__ : Any = image[0, -3:, -3:, -1] a__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a__ : Dict = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: '''simple docstring''' a__ : str = StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=_UpperCAmelCase ) assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) assert isinstance(pipe.scheduler , _UpperCAmelCase ) assert pipe.safety_checker is None a__ : Optional[Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_UpperCAmelCase ) a__ : List[Any] = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None a__ : List[str] = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def __lowerCAmelCase ( self : Dict ) -> Optional[int]: '''simple docstring''' a__ : Any = self.dummy_cond_unet a__ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase ) a__ : Any = self.dummy_vae a__ : int = self.dummy_text_encoder a__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 a__ : Union[str, Any] = unet.half() a__ : List[str] = vae.half() a__ : Dict = bert.half() # make sure here that pndm scheduler skips prk a__ : List[Any] = StableDiffusionPipeline( unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=self.dummy_extractor , ) a__ : List[Any] = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) a__ : List[str] = "A painting of a squirrel eating a burger" a__ : List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : Optional[int] ) -> str: '''simple docstring''' a__ : Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=_UpperCAmelCase ) a__ : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a__ : str = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) a__ : Any = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) a__ : Union[str, Any] = 4_0_0_3_6_6_0_3_4_6 a__ : Dict = 7 # without safety guidance (sld_guidance_scale = 0) a__ : Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) a__ : Optional[int] = sd_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a__ : Tuple = output.images a__ : Optional[int] = image[0, -3:, -3:, -1] a__ : List[str] = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) a__ : Optional[int] = torch.manual_seed(_UpperCAmelCase ) a__ : Optional[Any] = sd_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a__ : Any = output.images a__ : Tuple = image[0, -3:, -3:, -1] a__ : List[Any] = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: '''simple docstring''' a__ : Any = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=_UpperCAmelCase ) a__ : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a__ : Tuple = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) a__ : int = "padme amidala taking a bath artwork, safe for work, no nudity" a__ : Tuple = 2_7_3_4_9_7_1_7_5_5 a__ : Tuple = 7 a__ : Dict = torch.manual_seed(_UpperCAmelCase ) a__ : int = sd_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a__ : Tuple = output.images a__ : Optional[Any] = image[0, -3:, -3:, -1] a__ : Tuple = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 a__ : Any = torch.manual_seed(_UpperCAmelCase ) a__ : int = sd_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a__ : Dict = output.images a__ : int = image[0, -3:, -3:, -1] a__ : Optional[Any] = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCAmelCase ( self : str ) -> Tuple: '''simple docstring''' a__ : Optional[int] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) a__ : Any = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) a__ : Union[str, Any] = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) a__ : Dict = 1_0_4_4_3_5_5_2_3_4 a__ : List[str] = 1_2 a__ : Any = torch.manual_seed(_UpperCAmelCase ) a__ : List[Any] = sd_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) a__ : Optional[Any] = output.images a__ : Optional[Any] = image[0, -3:, -3:, -1] a__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 a__ : int = torch.manual_seed(_UpperCAmelCase ) a__ : Dict = sd_pipe( [prompt] , generator=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a__ : List[str] = output.images a__ : Any = image[0, -3:, -3:, -1] a__ : str = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
688
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = '''▁''' _lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} _lowerCamelCase : Dict = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } _lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4} class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = VOCAB_FILES_NAMES UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ = ["input_ids", "attention_mask"] def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Optional[int] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids: SCREAMING_SNAKE_CASE__ : Dict = cnt cnt += 1 with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f: for line in f.readlines(): SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0] SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids ) if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids: SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy() SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto() return state def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = d # for backward compatibility if not hasattr(self, "sp_model_kwargs" ): SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id] SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A_ ( self : Any ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def A_ ( self : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase ) def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def A_ ( self : List[str], _UpperCAmelCase : str ) -> str: """simple docstring""" return self.fairseq_ids_to_tokens[index] def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip() return out_string def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE__ : List[str] = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase, "wb" ) as fi: SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F'''{str(_UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
663
0
from __future__ import annotations import os from collections.abc import Mapping UpperCamelCase__ = tuple[int, int] class A : def __init__(self : Any , __UpperCAmelCase : set[int] , __UpperCAmelCase : Mapping[EdgeT, int] ) -> None: """simple docstring""" UpperCAmelCase__ = vertices UpperCAmelCase__ = { (min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items() } def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : EdgeT , __UpperCAmelCase : int ) -> None: """simple docstring""" self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) UpperCAmelCase__ = weight def lowercase_ (self : Union[str, Any] ) -> Graph: """simple docstring""" UpperCAmelCase__ = Graph({min(self.vertices )} , {} ) UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 while len(subgraph.vertices ) < len(self.vertices ): UpperCAmelCase__ = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: UpperCAmelCase__ = edge UpperCAmelCase__ = weight subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase ) return subgraph def lowerCAmelCase_ ( __A = "p107_network.txt" ) -> int: '''simple docstring''' UpperCAmelCase__ = os.path.abspath(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = {} UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 with open(SCREAMING_SNAKE_CASE__ ) as f: UpperCAmelCase__ = f.read().strip().split("\n" ) UpperCAmelCase__ = [line.split("," ) for line in data] for edgea in range(1, len(SCREAMING_SNAKE_CASE__ ) ): for edgea in range(SCREAMING_SNAKE_CASE__ ): if adjaceny_matrix[edgea][edgea] != "-": UpperCAmelCase__ = int(adjaceny_matrix[edgea][edgea] ) UpperCAmelCase__ = Graph(set(range(len(SCREAMING_SNAKE_CASE__ ) ) ), SCREAMING_SNAKE_CASE__ ) UpperCAmelCase__ = graph.prims_algorithm() UpperCAmelCase__ = sum(graph.edges.values() ) UpperCAmelCase__ = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f'''{solution() = }''')
486
from random import shuffle import tensorflow as tf from numpy import array def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = int(SCREAMING_SNAKE_CASE__ ) assert noofclusters < len(SCREAMING_SNAKE_CASE__ ) # Find out the dimensionality SCREAMING_SNAKE_CASE__ : List[Any] = len(vectors[0] ) # Will help select random centroids from among the available vectors SCREAMING_SNAKE_CASE__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) ) shuffle(SCREAMING_SNAKE_CASE__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. SCREAMING_SNAKE_CASE__ : Tuple = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION SCREAMING_SNAKE_CASE__ : List[Any] = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points SCREAMING_SNAKE_CASE__ : Any = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float64" , [dim] ) SCREAMING_SNAKE_CASE__ : Dict = [] for centroid in centroids: cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) SCREAMING_SNAKE_CASE__ : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("int32" ) SCREAMING_SNAKE_CASE__ : Tuple = [] for assignment in assignments: cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input SCREAMING_SNAKE_CASE__ : int = tf.placeholder("float" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [dim] ) SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float" , [dim] ) SCREAMING_SNAKE_CASE__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("float" , [noofclusters] ) SCREAMING_SNAKE_CASE__ : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. SCREAMING_SNAKE_CASE__ : Tuple = tf.initialize_all_variables() # Initialize all variables sess.run(SCREAMING_SNAKE_CASE__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. SCREAMING_SNAKE_CASE__ : Tuple = 1_00 for _ in range(SCREAMING_SNAKE_CASE__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ): SCREAMING_SNAKE_CASE__ : Any = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. SCREAMING_SNAKE_CASE__ : Tuple = [ sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input SCREAMING_SNAKE_CASE__ : Any = sess.run( SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(SCREAMING_SNAKE_CASE__ ): # Collect all the vectors assigned to this cluster SCREAMING_SNAKE_CASE__ : Dict = [ vectors[i] for i in range(len(SCREAMING_SNAKE_CASE__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location SCREAMING_SNAKE_CASE__ : str = sess.run( SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments SCREAMING_SNAKE_CASE__ : int = sess.run(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = sess.run(SCREAMING_SNAKE_CASE__ ) return centroids, assignments
663
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
462
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( '''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion''' ) _lowerCamelCase : List[str] = None _lowerCamelCase : Union[str, Any] = { '''7B''': 1_1_0_0_8, '''13B''': 1_3_8_2_4, '''30B''': 1_7_9_2_0, '''65B''': 2_2_0_1_6, '''70B''': 2_8_6_7_2, } _lowerCamelCase : Optional[Any] = { '''7B''': 1, '''7Bf''': 1, '''13B''': 2, '''13Bf''': 2, '''30B''': 4, '''65B''': 8, '''70B''': 8, '''70Bf''': 8, } def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : str=2_56 ) -> int: '''simple docstring''' return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=True ) -> int: '''simple docstring''' os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "tmp" ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(SCREAMING_SNAKE_CASE__ , "params.json" ) ) SCREAMING_SNAKE_CASE__ : int = NUM_SHARDS[model_size] SCREAMING_SNAKE_CASE__ : Union[str, Any] = params["n_layers"] SCREAMING_SNAKE_CASE__ : List[str] = params["n_heads"] SCREAMING_SNAKE_CASE__ : Optional[Any] = n_heads // num_shards SCREAMING_SNAKE_CASE__ : str = params["dim"] SCREAMING_SNAKE_CASE__ : List[str] = dim // n_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_0_0_0_0.0 SCREAMING_SNAKE_CASE__ : Tuple = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE__ , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: SCREAMING_SNAKE_CASE__ : int = params["n_kv_heads"] # for GQA / MQA SCREAMING_SNAKE_CASE__ : Optional[int] = n_heads_per_shard // num_key_value_heads SCREAMING_SNAKE_CASE__ : int = dim // num_key_value_heads else: # compatibility with other checkpoints SCREAMING_SNAKE_CASE__ : Dict = n_heads SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard SCREAMING_SNAKE_CASE__ : Dict = dim # permute for sliced rotary def permute(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=n_heads , SCREAMING_SNAKE_CASE__ : List[str]=dim , SCREAMING_SNAKE_CASE__ : Dict=dim ): return w.view(SCREAMING_SNAKE_CASE__ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) SCREAMING_SNAKE_CASE__ : Dict = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , "consolidated.00.pth" ) , map_location="cpu" ) else: # Sharded SCREAMING_SNAKE_CASE__ : List[Any] = [ torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , f'''consolidated.{i:02d}.pth''' ) , map_location="cpu" ) for i in range(SCREAMING_SNAKE_CASE__ ) ] SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : List[str] = {"weight_map": {}} for layer_i in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded SCREAMING_SNAKE_CASE__ : List[Any] = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. SCREAMING_SNAKE_CASE__ : Any = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } SCREAMING_SNAKE_CASE__ : int = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : Tuple = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : List[str] = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ) SCREAMING_SNAKE_CASE__ : List[str] = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ) SCREAMING_SNAKE_CASE__ : Tuple = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ) SCREAMING_SNAKE_CASE__ : int = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ) SCREAMING_SNAKE_CASE__ : List[str] = inv_freq for k, v in state_dict.items(): SCREAMING_SNAKE_CASE__ : str = filename param_count += v.numel() torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded SCREAMING_SNAKE_CASE__ : List[str] = { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } else: SCREAMING_SNAKE_CASE__ : Optional[Any] = { "model.norm.weight": loaded[0]["norm.weight"], "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ), } for k, v in state_dict.items(): SCREAMING_SNAKE_CASE__ : Optional[int] = filename param_count += v.numel() torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # Write configs SCREAMING_SNAKE_CASE__ : Optional[Any] = {"total_size": param_count * 2} write_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin.index.json" ) ) SCREAMING_SNAKE_CASE__ : List[str] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 SCREAMING_SNAKE_CASE__ : Dict = params["multiple_of"] if "multiple_of" in params else 2_56 SCREAMING_SNAKE_CASE__ : Dict = LlamaConfig( hidden_size=SCREAMING_SNAKE_CASE__ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=SCREAMING_SNAKE_CASE__ , ) config.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model." ) SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ ) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format." ) model.save_pretrained(SCREAMING_SNAKE_CASE__ , safe_serialization=SCREAMING_SNAKE_CASE__ ) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_class(SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) def _a ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() parser.add_argument( "--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , ) parser.add_argument( "--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , ) parser.add_argument( "--output_dir" , help="Location to write HF model and tokenizer" , ) parser.add_argument("--safe_serialization" , type=SCREAMING_SNAKE_CASE__ , help="Whether or not to save using `safetensors`." ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(args.input_dir , "tokenizer.model" ) write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
663
0
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if len(SCREAMING_SNAKE_CASE__ ) == 0: return False A_ : str = len(SCREAMING_SNAKE_CASE__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , SCREAMING_SNAKE_CASE__ ) else: return binary_search(a_list[midpoint + 1 :] , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": UpperCamelCase = input("""Enter numbers separated by comma:\n""").strip() UpperCamelCase = [int(item.strip()) for item in user_input.split(""",""")] UpperCamelCase = int(input("""Enter the number to be found in the list:\n""").strip()) UpperCamelCase = '''''' if binary_search(sequence, target) else '''not ''' print(F'''{target} was {not_str}found in {sequence}''')
590
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase (__lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = GPTaTokenizer UpperCAmelCase_ = GPTaTokenizerFast UpperCAmelCase_ = True UpperCAmelCase_ = {"add_prefix_space": True} UpperCAmelCase_ = False def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : Any = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"} SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file, "w", encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "lower newer" SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer" return input_text, output_text def A_ ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ : Tuple = "lower newer" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase ) def A_ ( self : Dict ) -> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = "lower newer" # Testing tokenization SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing the unknown token SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase ) def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) # Simple input SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE__ : List[Any] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Simple input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Simple input self.assertRaises( _UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", ) # Pair input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Pair input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Pair input self.assertRaises( _UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", ) def A_ ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" ) # Simple input SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"] SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE__ : int = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1], 3_0 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1], 6_0 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def A_ ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$" SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase ) self.assertEqual(out_s.input_ids[0], _UpperCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0], _UpperCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def A_ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass def A_ ( self : Dict ) -> str: """simple docstring""" # TODO: change to self.get_tokenizers() when the fast version is implemented SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this." SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please." SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus( _UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"] SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase ) ] SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(_UpperCAmelCase, _UpperCAmelCase ) @require_tokenizers class lowerCamelCase (unittest.TestCase ): """simple docstring""" def A_ ( self : Optional[Any] ) -> int: """simple docstring""" # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat" SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("test_opt" ) SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat" SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode( _UpperCAmelCase, ) # Same as above self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def A_ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = "bos" SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"] SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat" SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode( _UpperCAmelCase, ) # We changed the bos token self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("./tok" ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
663
0
def _lowercase ( __lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int ) -> int: '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: UpperCamelCase__ : Union[str, Any] = _modexpt(SCREAMING_SNAKE_CASE__ ,exponent // 2 ,SCREAMING_SNAKE_CASE__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(SCREAMING_SNAKE_CASE__ ,exponent - 1 ,SCREAMING_SNAKE_CASE__ )) % modulo_value def _lowercase ( __lowerCamelCase : int = 1777 ,__lowerCamelCase : int = 1855 ,__lowerCamelCase : int = 8 ) -> int: '''simple docstring''' UpperCamelCase__ : List[str] = base for _ in range(1 ,SCREAMING_SNAKE_CASE__ ): UpperCamelCase__ : Union[str, Any] = _modexpt(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
344
from functools import lru_cache def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(SCREAMING_SNAKE_CASE__ ) if n > 1: factors.add(SCREAMING_SNAKE_CASE__ ) return factors @lru_cache def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) ) def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool: '''simple docstring''' return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1) def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 2 while True: # Increment each value of a generated range SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group] checker.append(SCREAMING_SNAKE_CASE__ ) # If all numbers in the list are equal, return the group variable. if equality(SCREAMING_SNAKE_CASE__ ): return group # Increment our base variable by 1 base += 1 def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ ) return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None if __name__ == "__main__": print(solution())
663
0
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a : List[Any] = ( '''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py''' ) a : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name def _SCREAMING_SNAKE_CASE ( ) ->int: '''simple docstring''' a : List[str] = "https://pypi.org/pypi/diffusers/json" a : Optional[Any] = json.loads(request.urlopen(SCREAMING_SNAKE_CASE__ ).read() )["releases"].keys() return sorted(SCREAMING_SNAKE_CASE__ , key=lambda _lowercase : version.Version(SCREAMING_SNAKE_CASE__ ) ) def _SCREAMING_SNAKE_CASE ( ) ->str: '''simple docstring''' if HF_MODULES_CACHE in sys.path: return sys.path.append(SCREAMING_SNAKE_CASE__ ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) a : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ ) / "__init__.py" if not init_path.exists(): init_path.touch() def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, os.PathLike] ) ->List[Any]: '''simple docstring''' init_hf_modules() a : str = Path(SCREAMING_SNAKE_CASE__ ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) a : Tuple = dynamic_module_path / "__init__.py" if not init_path.exists(): init_path.touch() def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Tuple: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f: a : List[Any] = f.read() # Imports of the form `import .xxx` a : Optional[Any] = re.findall("^\s*import\s+\.(\S+)\s*$" , SCREAMING_SNAKE_CASE__ , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , SCREAMING_SNAKE_CASE__ , flags=re.MULTILINE ) # Unique-ify return list(set(SCREAMING_SNAKE_CASE__ ) ) def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->int: '''simple docstring''' a : List[Any] = False a : str = [module_file] a : Optional[Any] = [] # Let's recurse through all relative imports while not no_change: a : List[Any] = [] for f in files_to_check: new_imports.extend(get_relative_imports(SCREAMING_SNAKE_CASE__ ) ) a : List[str] = Path(SCREAMING_SNAKE_CASE__ ).parent a : Optional[Any] = [str(module_path / m ) for m in new_imports] a : Tuple = [f for f in new_import_files if f not in all_relative_imports] a : Any = [F"""{f}.py""" for f in new_import_files] a : str = len(SCREAMING_SNAKE_CASE__ ) == 0 all_relative_imports.extend(SCREAMING_SNAKE_CASE__ ) return all_relative_imports def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Tuple: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f: a : List[Any] = f.read() # Imports of the form `import xxx` a : Any = re.findall("^\s*import\s+(\S+)\s*$" , SCREAMING_SNAKE_CASE__ , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("^\s*from\s+(\S+)\s+import" , SCREAMING_SNAKE_CASE__ , flags=re.MULTILINE ) # Only keep the top-level module a : List[str] = [imp.split("." )[0] for imp in imports if not imp.startswith("." )] # Unique-ify and test we got them all a : Dict = list(set(SCREAMING_SNAKE_CASE__ ) ) a : str = [] for imp in imports: try: importlib.import_module(SCREAMING_SNAKE_CASE__ ) except ImportError: missing_packages.append(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: raise ImportError( "This modeling file requires the following packages that were not found in your environment: " F"""{', '.join(SCREAMING_SNAKE_CASE__ )}. Run `pip install {' '.join(SCREAMING_SNAKE_CASE__ )}`""" ) return get_relative_imports(SCREAMING_SNAKE_CASE__ ) def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Tuple ) ->Tuple: '''simple docstring''' a : List[Any] = module_path.replace(os.path.sep , "." ) a : Union[str, Any] = importlib.import_module(SCREAMING_SNAKE_CASE__ ) if class_name is None: return find_pipeline_class(SCREAMING_SNAKE_CASE__ ) return getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->Dict: '''simple docstring''' from ..pipelines import DiffusionPipeline a : Optional[Any] = dict(inspect.getmembers(SCREAMING_SNAKE_CASE__ , inspect.isclass ) ) a : int = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , SCREAMING_SNAKE_CASE__ ) and cls.__module__.split("." )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" F""" {loaded_module}.""" ) a : Optional[Any] = cls return pipeline_class def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, os.PathLike] , _lowercase : str , _lowercase : Optional[Union[str, os.PathLike]] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : Optional[Dict[str, str]] = None , _lowercase : Optional[Union[bool, str]] = None , _lowercase : Optional[str] = None , _lowercase : bool = False , ) ->Union[str, Any]: '''simple docstring''' a : int = str(SCREAMING_SNAKE_CASE__ ) a : str = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if os.path.isfile(SCREAMING_SNAKE_CASE__ ): a : Tuple = module_file_or_url a : Dict = "local" elif pretrained_model_name_or_path.count("/" ) == 0: a : Optional[Any] = get_diffusers_versions() # cut ".dev0" a : Dict = "v" + ".".join(__version__.split("." )[:3] ) # retrieve github version that matches if revision is None: a : Dict = latest_version if latest_version[1:] in available_versions else "main" logger.info(F"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: a : Tuple = F"""v{revision}""" elif revision == "main": a : Optional[Any] = revision else: raise ValueError( F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" F""" {', '.join(available_versions + ['main'] )}.""" ) # community pipeline on GitHub a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=SCREAMING_SNAKE_CASE__ , pipeline=SCREAMING_SNAKE_CASE__ ) try: a : Tuple = cached_download( SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , ) a : Dict = "git" a : Tuple = pretrained_model_name_or_path + ".py" except EnvironmentError: logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached a : List[str] = hf_hub_download( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , ) a : Optional[int] = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) ) except EnvironmentError: logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment a : List[str] = check_imports(SCREAMING_SNAKE_CASE__ ) # Now we move the module inside our cached dynamic modules. a : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(SCREAMING_SNAKE_CASE__ ) a : str = Path(SCREAMING_SNAKE_CASE__ ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(SCREAMING_SNAKE_CASE__ , submodule_path / module_file ) for module_needed in modules_needed: a : int = F"""{module_needed}.py""" shutil.copy(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): a : Union[str, Any] = use_auth_token elif use_auth_token is True: a : Optional[int] = HfFolder.get_token() else: a : int = None a : Any = model_info(SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. a : str = submodule_path / commit_hash a : List[Any] = full_submodule + os.path.sep + commit_hash create_dynamic_module(SCREAMING_SNAKE_CASE__ ) if not (submodule_path / module_file).exists(): shutil.copy(SCREAMING_SNAKE_CASE__ , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( SCREAMING_SNAKE_CASE__ , F"""{module_needed}.py""" , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , ) return os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, os.PathLike] , _lowercase : str , _lowercase : Optional[str] = None , _lowercase : Optional[Union[str, os.PathLike]] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : Optional[Dict[str, str]] = None , _lowercase : Optional[Union[bool, str]] = None , _lowercase : Optional[str] = None , _lowercase : bool = False , **_lowercase : List[Any] , ) ->Optional[int]: '''simple docstring''' a : Union[str, Any] = get_cached_module_file( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , ) return get_class_in_module(SCREAMING_SNAKE_CASE__ , final_module.replace(".py" , "" ) )
633
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class lowerCamelCase (unittest.TestCase ): """simple docstring""" @require_torch def A_ ( self : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = pipeline( task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" ) SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" ) SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"] SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], ) @unittest.skip("No models are available in TF" ) def A_ ( self : str ) -> Dict: """simple docstring""" pass @slow @require_torch def A_ ( self : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline( task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", ) # This is an audio of a dog SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"] SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ) SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) SCREAMING_SNAKE_CASE__ : Any = audio_classifier( [audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) @unittest.skip("No models are available in TF" ) def A_ ( self : str ) -> List[str]: """simple docstring""" pass
663
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class __lowerCAmelCase : '''simple docstring''' a_ = PegasusConfig a_ = {} a_ = """gelu""" def __init__( self : Union[str, Any] ,_a : Optional[Any] ,_a : Tuple=13 ,_a : int=7 ,_a : Optional[int]=True ,_a : int=False ,_a : Union[str, Any]=99 ,_a : Optional[Any]=32 ,_a : Optional[Any]=2 ,_a : Tuple=4 ,_a : str=37 ,_a : List[str]=0.1 ,_a : List[str]=0.1 ,_a : Dict=40 ,_a : Any=2 ,_a : int=1 ,_a : str=0 ,): '''simple docstring''' A_ : List[str] = parent A_ : Tuple = batch_size A_ : List[Any] = seq_length A_ : int = is_training A_ : int = use_labels A_ : Tuple = vocab_size A_ : Optional[int] = hidden_size A_ : List[Any] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : Dict = intermediate_size A_ : Optional[int] = hidden_dropout_prob A_ : int = attention_probs_dropout_prob A_ : List[Any] = max_position_embeddings A_ : str = eos_token_id A_ : Dict = pad_token_id A_ : Tuple = bos_token_id def _a ( self : int ): '''simple docstring''' A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) A_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) A_ : Optional[int] = tf.concat([input_ids, eos_tensor] ,axis=1 ) A_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) A_ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) return config, inputs_dict def _a ( self : Union[str, Any] ,_a : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder() A_ : Optional[int] = inputs_dict["input_ids"] A_ : str = input_ids[:1, :] A_ : Optional[int] = inputs_dict["attention_mask"][:1, :] A_ : Union[str, Any] = inputs_dict["head_mask"] A_ : int = 1 # first forward pass A_ : Union[str, Any] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,head_mask=_UpperCAmelCase ,use_cache=_UpperCAmelCase ) A_ : int = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A_ : str = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and A_ : Optional[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 ) A_ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) A_ : Optional[int] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase )[0] A_ : Optional[int] = model(_UpperCAmelCase ,attention_mask=_UpperCAmelCase ,past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice A_ : Optional[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) A_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] A_ : List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase ,_UpperCAmelCase ,rtol=1e-3 ) def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : int=None , lowerCamelCase : Tuple=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Any=None , lowerCamelCase : List[str]=None , ): if attention_mask is None: A_ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id) , tf.inta) if decoder_attention_mask is None: A_ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta), ] , axis=-1 , ) if head_mask is None: A_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: A_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: A_ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowerCAmelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): '''simple docstring''' a_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () a_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () a_ = ( { """conversational""": TFPegasusForConditionalGeneration, """feature-extraction""": TFPegasusModel, """summarization""": TFPegasusForConditionalGeneration, """text2text-generation""": TFPegasusForConditionalGeneration, """translation""": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) a_ = True a_ = False a_ = False def _a ( self : List[str] ): '''simple docstring''' A_ : Optional[int] = TFPegasusModelTester(self ) A_ : List[Any] = ConfigTester(self ,config_class=_UpperCAmelCase ) def _a ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : str ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' a_ = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] a_ = [ """California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to""" """ reduce the risk of wildfires.""", """N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""", ] # differs slightly from pytorch, likely due to numerical differences in linear layers a_ = """google/pegasus-xsum""" @cached_property def _a ( self : Union[str, Any] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def _a ( self : Optional[int] ): '''simple docstring''' A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def _a ( self : str ,**_a : Optional[int] ): '''simple docstring''' A_ : Tuple = self.translate_src_text(**_UpperCAmelCase ) assert self.expected_text == generated_words def _a ( self : Any ,**_a : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = self.tokenizer(self.src_text ,**_UpperCAmelCase ,padding=_UpperCAmelCase ,return_tensors="""tf""" ) A_ : List[str] = self.model.generate( model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=_UpperCAmelCase ,) A_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def _a ( self : List[Any] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
665
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. _lowerCamelCase : List[str] = 2_0_0 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. _lowerCamelCase : Any = 5_0 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. _lowerCamelCase : str = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_0_0_0)) def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, float]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] ) return (item, float(SCREAMING_SNAKE_CASE__ )) def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:] SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: SCREAMING_SNAKE_CASE__ : Tuple = random.choice(SCREAMING_SNAKE_CASE__ ) return "".join(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ) -> list[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = [] # Generate more children proportionally to the fitness score. SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 1_00 ) + 1 SCREAMING_SNAKE_CASE__ : Tuple = 10 if child_n >= 10 else child_n for _ in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0] SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ ) # Append new string to the population list. pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) return pop def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> tuple[int, int, str]: '''simple docstring''' if N_POPULATION < N_SELECTED: SCREAMING_SNAKE_CASE__ : str = f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(SCREAMING_SNAKE_CASE__ ) # Verify that the target contains no genes besides the ones inside genes variable. SCREAMING_SNAKE_CASE__ : Optional[int] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: SCREAMING_SNAKE_CASE__ : Dict = f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(SCREAMING_SNAKE_CASE__ ) # Generate random starting population. SCREAMING_SNAKE_CASE__ : List[Any] = [] for _ in range(SCREAMING_SNAKE_CASE__ ): population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) ) # Just some logs to know what the algorithms is doing. SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(SCREAMING_SNAKE_CASE__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. SCREAMING_SNAKE_CASE__ : int = [evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population] # Check if there is a matching evolution. SCREAMING_SNAKE_CASE__ : List[str] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. SCREAMING_SNAKE_CASE__ : str = population[: int(N_POPULATION / 3 )] population.clear() population.extend(SCREAMING_SNAKE_CASE__ ) # Normalize population score to be between 0 and 1. SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ (item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score ] # This is selection for i in range(SCREAMING_SNAKE_CASE__ ): population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION: break if __name__ == "__main__": _lowerCamelCase : Dict = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) _lowerCamelCase : Tuple = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = basic(target_str, genes_list) print( f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
663
0
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __UpperCAmelCase = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __UpperCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS) __UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING __UpperCAmelCase = { # used to compute the property `self.chunk_length` '''EncodecConfig''': ['''overlap'''], # used as `self.bert_model = BertModel(config, ...)` '''DPRConfig''': True, # not used in modeling files, but it's an important information '''FSMTConfig''': ['''langs'''], # used internally in the configuration class file '''GPTNeoConfig''': ['''attention_types'''], # used internally in the configuration class file '''EsmConfig''': ['''is_folding_model'''], # used during training (despite we don't have training script for these models yet) '''Mask2FormerConfig''': ['''ignore_value'''], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) '''OneFormerConfig''': ['''ignore_value''', '''norm'''], # used during preprocessing and collation, see `collating_graphormer.py` '''GraphormerConfig''': ['''spatial_pos_max'''], # used internally in the configuration class file '''T5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally '''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], '''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], # used internally in the configuration class file '''LongT5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file '''SwitchTransformersConfig''': ['''feed_forward_proj'''], # having default values other than `1e-5` - we can't fix them without breaking '''BioGptConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''GLPNConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''SegformerConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''CvtConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''PerceiverConfig''': ['''layer_norm_eps'''], # used internally to calculate the feature size '''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate `mlp_dim` '''SamVisionConfig''': ['''mlp_ratio'''], # For (head) training, but so far not implemented '''ClapAudioConfig''': ['''num_classes'''], # Not used, but providing useful information to users '''SpeechT5HifiGanConfig''': ['''sampling_rate'''], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { 'CLIPSegConfig': True, 'DeformableDetrConfig': True, 'DetaConfig': True, 'DinatConfig': True, 'DonutSwinConfig': True, 'EfficientFormerConfig': True, 'FSMTConfig': True, 'JukeboxConfig': True, 'LayoutLMv2Config': True, 'MaskFormerSwinConfig': True, 'MT5Config': True, 'NatConfig': True, 'OneFormerConfig': True, 'PerceiverConfig': True, 'RagConfig': True, 'SpeechT5Config': True, 'SwinConfig': True, 'Swin2SRConfig': True, 'Swinv2Config': True, 'SwitchTransformersConfig': True, 'TableTransformerConfig': True, 'TapasConfig': True, 'TransfoXLConfig': True, 'UniSpeechConfig': True, 'UniSpeechSatConfig': True, 'WavLMConfig': True, 'WhisperConfig': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) 'JukeboxPriorConfig': True, # TODO: @Younes (for `is_decoder`) 'Pix2StructTextConfig': True, } ) def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : str ) -> Optional[int]: '''simple docstring''' lowerCAmelCase_ : str = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f'config.{attribute}' in modeling_source or f'getattr(config, "{attribute}"' in modeling_source or f'getattr(self.config, "{attribute}"' in modeling_source ): lowerCAmelCase_ : Tuple = True # Deal with multi-line cases elif ( re.search( Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , SCREAMING_SNAKE_CASE__ , ) is not None ): lowerCAmelCase_ : Any = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: lowerCAmelCase_ : List[str] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files lowerCAmelCase_ : str = [ "bos_index", "eos_index", "pad_index", "unk_index", "mask_index", "image_size", "use_cache", "out_features", "out_indices", ] lowerCAmelCase_ : Any = ["encoder_no_repeat_ngram_size"] # Special cases to be allowed lowerCAmelCase_ : Optional[int] = True if not attribute_used: lowerCAmelCase_ : List[Any] = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: lowerCAmelCase_ : Dict = True elif attribute in ["tie_word_embeddings"] and default_value is False: lowerCAmelCase_ : List[Any] = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: lowerCAmelCase_ : List[str] = True elif attribute.endswith("""_token_id""" ): lowerCAmelCase_ : List[Any] = True # configuration class specific cases if not case_allowed: lowerCAmelCase_ : Optional[int] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) lowerCAmelCase_ : Dict = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> int: '''simple docstring''' lowerCAmelCase_ : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters ) lowerCAmelCase_ : Any = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]] lowerCAmelCase_ : Tuple = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass lowerCAmelCase_ : Union[str, Any] = {} if len(config_class.attribute_map ) > 0: lowerCAmelCase_ : Tuple = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files lowerCAmelCase_ : Optional[int] = inspect.getsourcefile(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase_ : Union[str, Any] = os.path.dirname(SCREAMING_SNAKE_CASE__ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. lowerCAmelCase_ : str = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for fn in os.listdir(SCREAMING_SNAKE_CASE__ ) if fn.startswith("""modeling_""" )] # Get the source code strings lowerCAmelCase_ : Optional[Any] = [] for path in modeling_paths: if os.path.isfile(SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ ) as fp: modeling_sources.append(fp.read() ) lowerCAmelCase_ : Any = [] for config_param, default_value in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # `attributes` here is all the variant names for `config_param` lowerCAmelCase_ : Optional[int] = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): unused_attributes.append(attributes[0] ) return sorted(SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ : Any = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) lowerCAmelCase_ : Tuple = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda lowercase__ : inspect.isclass(SCREAMING_SNAKE_CASE__ ) and issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and inspect.getmodule(SCREAMING_SNAKE_CASE__ ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: lowerCAmelCase_ : List[Any] = check_config_attributes_being_used(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: lowerCAmelCase_ : int = unused_attributes if len(SCREAMING_SNAKE_CASE__ ) > 0: lowerCAmelCase_ : Optional[int] = "The following configuration classes contain unused attributes in the corresponding modeling files:\n" for name, attributes in configs_with_unused_attributes.items(): error += f'{name}: {attributes}\n' raise ValueError(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": check_config_attributes()
600
from collections.abc import Callable import numpy as np def _a ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE__ : Tuple = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE__ : Tuple = ya SCREAMING_SNAKE_CASE__ : Dict = xa for k in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : List[str] = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
663
0
class snake_case__ : '''simple docstring''' def __init__( self : List[Any] ) -> Any: UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = {} def UpperCamelCase ( self : str , lowerCAmelCase_ : List[Any] ) -> Any: if vertex not in self.adjacency: UpperCAmelCase_ = {} self.num_vertices += 1 def UpperCamelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> Dict: self.add_vertex(_UpperCAmelCase ) self.add_vertex(_UpperCAmelCase ) if head == tail: return UpperCAmelCase_ = weight UpperCAmelCase_ = weight def UpperCamelCase ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ = self.get_edges() for edge in edges: UpperCAmelCase_ = edge edges.remove((tail, head, weight) ) for i in range(len(_UpperCAmelCase ) ): UpperCAmelCase_ = list(edges[i] ) edges.sort(key=lambda lowerCAmelCase_ : e[2] ) for i in range(len(_UpperCAmelCase ) - 1 ): if edges[i][2] >= edges[i + 1][2]: UpperCAmelCase_ = edges[i][2] + 1 for edge in edges: UpperCAmelCase_ = edge UpperCAmelCase_ = weight UpperCAmelCase_ = weight def __str__( self : Tuple ) -> Dict: UpperCAmelCase_ = "" for tail in self.adjacency: for head in self.adjacency[tail]: UpperCAmelCase_ = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip('''\n''' ) def UpperCamelCase ( self : Dict ) -> List[Any]: UpperCAmelCase_ = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def UpperCamelCase ( self : List[str] ) -> List[Any]: return self.adjacency.keys() @staticmethod def UpperCamelCase ( lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None ) -> Tuple: UpperCAmelCase_ = Graph() if vertices is None: UpperCAmelCase_ = [] if edges is None: UpperCAmelCase_ = [] for vertex in vertices: g.add_vertex(_UpperCAmelCase ) for edge in edges: g.add_edge(*_UpperCAmelCase ) return g class snake_case__ : '''simple docstring''' def __init__( self : Optional[Any] ) -> str: UpperCAmelCase_ = {} UpperCAmelCase_ = {} def __len__( self : List[str] ) -> Union[str, Any]: return len(self.parent ) def UpperCamelCase ( self : Dict , lowerCAmelCase_ : Any ) -> List[Any]: if item in self.parent: return self.find(_UpperCAmelCase ) UpperCAmelCase_ = item UpperCAmelCase_ = 0 return item def UpperCamelCase ( self : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Optional[int]: if item not in self.parent: return self.make_set(_UpperCAmelCase ) if item != self.parent[item]: UpperCAmelCase_ = self.find(self.parent[item] ) return self.parent[item] def UpperCamelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ) -> List[str]: UpperCAmelCase_ = self.find(_UpperCAmelCase ) UpperCAmelCase_ = self.find(_UpperCAmelCase ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: UpperCAmelCase_ = roota return roota if self.rank[roota] < self.rank[roota]: UpperCAmelCase_ = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 UpperCAmelCase_ = roota return roota return None @staticmethod def UpperCamelCase ( lowerCAmelCase_ : Union[str, Any] ) -> List[str]: UpperCAmelCase_ = graph.num_vertices UpperCAmelCase_ = Graph.UnionFind() UpperCAmelCase_ = [] while num_components > 1: UpperCAmelCase_ = {} for vertex in graph.get_vertices(): UpperCAmelCase_ = -1 UpperCAmelCase_ = graph.get_edges() for edge in edges: UpperCAmelCase_ = edge edges.remove((tail, head, weight) ) for edge in edges: UpperCAmelCase_ = edge UpperCAmelCase_ = union_find.find(_UpperCAmelCase ) UpperCAmelCase_ = union_find.find(_UpperCAmelCase ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: UpperCAmelCase_ = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: UpperCAmelCase_ = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: UpperCAmelCase_ = cheap_edge[vertex] if union_find.find(_UpperCAmelCase ) != union_find.find(_UpperCAmelCase ): union_find.union(_UpperCAmelCase , _UpperCAmelCase ) mst_edges.append(cheap_edge[vertex] ) UpperCAmelCase_ = num_components - 1 UpperCAmelCase_ = Graph.build(edges=_UpperCAmelCase ) return mst
121
def _a ( SCREAMING_SNAKE_CASE__ : List[Any]=2_81_23 ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i SCREAMING_SNAKE_CASE__ : int = set() SCREAMING_SNAKE_CASE__ : Any = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(SCREAMING_SNAKE_CASE__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
663
0
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def snake_case_ ( _lowerCAmelCase : str ) -> List[str]: UpperCAmelCase : Tuple = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def snake_case_ ( _lowerCAmelCase : int ) -> List[str]: UpperCAmelCase : str = emb.weight.shape UpperCAmelCase : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Dict = emb.weight.data return lin_layer def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=None ) -> Optional[int]: UpperCAmelCase : List[Any] = {} for old_key in state_dict.keys(): UpperCAmelCase : Union[str, Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : Dict = key.replace('''moe_layer.experts.0''' , f"""ffn.experts.expert_{expert_idx}""" ) else: UpperCAmelCase : Optional[int] = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: UpperCAmelCase : str = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: UpperCAmelCase : Dict = key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: UpperCAmelCase : int = key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: UpperCAmelCase : Optional[Any] = key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : Dict = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: UpperCAmelCase : Dict = key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) UpperCAmelCase : Dict = state_dict[old_key] return new_dict def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : str = WEIGHTS_NAME ) -> Dict: UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : Optional[int] = 0 os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) for expert in range(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : Union[str, Any] = switch_checkpoint_path + f"""-rank-{expert}.pt""" if os.path.isfile(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ )["model"] remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[Any] = rename_fairseq_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Optional[Any] = os.path.join( SCREAMING_SNAKE_CASE__ , weights_name.replace('''.bin''' , f"""-{len(SCREAMING_SNAKE_CASE__ )+1:05d}-of-???.bin""" ) ) torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(SCREAMING_SNAKE_CASE__ )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , weights_name.replace('''.bin''' , f"""-{len(SCREAMING_SNAKE_CASE__ )+1:05d}-of-???.bin""" ) ) UpperCAmelCase : Any = torch.load(switch_checkpoint_path + '''-shared.pt''' )["model"] remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[Any] = rename_fairseq_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[Any] = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(SCREAMING_SNAKE_CASE__ ) == 1: UpperCAmelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Otherwise, let's build the index UpperCAmelCase : List[str] = {} for idx, shard in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : List[str] = weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE__ ):05d}.bin""" ) UpperCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , weights_name.replace('''.bin''' , f"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) for key in shard: UpperCAmelCase : List[str] = shard_file # Add the metadata UpperCAmelCase : List[str] = {"total_size": total_size} UpperCAmelCase : Optional[int] = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''w''' , encoding='''utf-8''' ) as f: UpperCAmelCase : List[str] = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + "\n" f.write(SCREAMING_SNAKE_CASE__ ) return metadata, index if __name__ == "__main__": UpperCamelCase__: int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--nllb_moe_checkpoint_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b", type=str, required=False, help="Path to the output pytorch model.", ) UpperCamelCase__: str = parser.parse_args() UpperCamelCase__: List[Any] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) UpperCamelCase__: str = NllbMoeConfig.from_pretrained( "facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) UpperCamelCase__: int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("Done") model.save_pretrained(args.pytorch_dump_folder_path)
127
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCamelCase : Optional[Any] = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = ['''MobileViTFeatureExtractor'''] _lowerCamelCase : List[str] = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[Any] = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Optional[int] = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
663
0
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class A ( __lowerCamelCase ): __UpperCAmelCase : Optional[Any] = (DPMSolverSDEScheduler,) __UpperCAmelCase : int = 10 def __lowerCAmelCase ( self , **snake_case_ ) -> Tuple: _a = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "noise_sampler_seed": 0, } config.update(**_UpperCAmelCase ) return config def __lowerCAmelCase ( self ) -> int: for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def __lowerCAmelCase ( self ) -> int: for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def __lowerCAmelCase ( self ) -> List[Any]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def __lowerCAmelCase ( self ) -> int: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def __lowerCAmelCase ( self ) -> Any: _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) _a = self.dummy_model() _a = self.dummy_sample_deter * scheduler.init_noise_sigma _a = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _a = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _a = model(_UpperCAmelCase , _UpperCAmelCase ) _a = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _a = output.prev_sample _a = torch.sum(torch.abs(_UpperCAmelCase ) ) _a = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2 assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2 assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3 def __lowerCAmelCase ( self ) -> Tuple: _a = self.scheduler_classes[0] _a = self.get_scheduler_config(prediction_type="v_prediction" ) _a = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) _a = self.dummy_model() _a = self.dummy_sample_deter * scheduler.init_noise_sigma _a = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): _a = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _a = model(_UpperCAmelCase , _UpperCAmelCase ) _a = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _a = output.prev_sample _a = torch.sum(torch.abs(_UpperCAmelCase ) ) _a = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2 assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2 assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2 assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3 def __lowerCAmelCase ( self ) -> Dict: _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase ) _a = self.dummy_model() _a = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: _a = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _a = model(_UpperCAmelCase , _UpperCAmelCase ) _a = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _a = output.prev_sample _a = torch.sum(torch.abs(_UpperCAmelCase ) ) _a = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2 assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2 assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3 def __lowerCAmelCase ( self ) -> Optional[Any]: _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**_UpperCAmelCase , use_karras_sigmas=_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase ) _a = self.dummy_model() _a = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma _a = sample.to(_UpperCAmelCase ) for t in scheduler.timesteps: _a = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) _a = model(_UpperCAmelCase , _UpperCAmelCase ) _a = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _a = output.prev_sample _a = torch.sum(torch.abs(_UpperCAmelCase ) ) _a = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
131
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowerCamelCase : """simple docstring""" UpperCAmelCase_ = BlenderbotSmallConfig UpperCAmelCase_ = {} UpperCAmelCase_ = "gelu" def __init__( self : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : str=9_9, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Any=2, _UpperCAmelCase : Any=4, _UpperCAmelCase : List[Any]=3_7, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=2_0, _UpperCAmelCase : int=2, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : List[str]=0, ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = is_training SCREAMING_SNAKE_CASE__ : List[Any] = use_labels SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Any = num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE__ : Any = eos_token_id SCREAMING_SNAKE_CASE__ : Optional[int] = pad_token_id SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id def A_ ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) SCREAMING_SNAKE_CASE__ : Dict = tf.concat([input_ids, eos_tensor], axis=1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Any = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) SCREAMING_SNAKE_CASE__ : str = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) return config, inputs_dict def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : int ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder() SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["input_ids"] SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids[:1, :] SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict["attention_mask"][:1, :] SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["head_mask"] SCREAMING_SNAKE_CASE__ : Tuple = 1 # first forward pass SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3), config.vocab_size ) SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, next_tokens], axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 ) SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 ) def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) UpperCAmelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase_ = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase_ = True UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = TFBlenderbotSmallModelTester(self ) SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase ) def A_ ( self : Any ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def A_ ( self : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_tokenizers @require_tf class lowerCamelCase (unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] UpperCAmelCase_ = "facebook/blenderbot_small-90M" @cached_property def A_ ( self : Dict ) -> Optional[Any]: """simple docstring""" # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def A_ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def A_ ( self : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, return_tensors="tf" ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
663
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" __UpperCamelCase = DiTPipeline __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __UpperCamelCase = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } __UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __UpperCamelCase = False def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) a__ : Optional[Any] = TransformeraDModel( sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_UpperCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_0_0_0 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_UpperCAmelCase , ) a__ : Dict = AutoencoderKL() a__ : Dict = DDIMScheduler() a__ : List[Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[Any] , A__ : Optional[int]=0 ) -> Dict: '''simple docstring''' if str(_UpperCAmelCase ).startswith('''mps''' ): a__ : Tuple = torch.manual_seed(_UpperCAmelCase ) else: a__ : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) a__ : Optional[int] = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __lowerCAmelCase ( self : List[Any] ) -> List[Any]: '''simple docstring''' a__ : Union[str, Any] = "cpu" a__ : Tuple = self.get_dummy_components() a__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) a__ : Any = self.get_dummy_inputs(_UpperCAmelCase ) a__ : str = pipe(**_UpperCAmelCase ).images a__ : str = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 1_6, 1_6, 3) ) a__ : Dict = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) a__ : str = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_UpperCAmelCase , 1E-3 ) def __lowerCAmelCase ( self : Union[str, Any] ) -> int: '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=_UpperCAmelCase , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self : int ) -> Tuple: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : Dict ) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : int ) -> Tuple: '''simple docstring''' a__ : int = torch.manual_seed(0 ) a__ : List[Any] = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' ) pipe.to('''cuda''' ) a__ : Union[str, Any] = ["vase", "umbrella", "white shark", "white wolf"] a__ : List[Any] = pipe.get_label_ids(_UpperCAmelCase ) a__ : List[Any] = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=4_0 , output_type='''np''' ).images for word, image in zip(_UpperCAmelCase , _UpperCAmelCase ): a__ : str = load_numpy( F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' a__ : str = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' ) a__ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('''cuda''' ) a__ : int = ["vase", "umbrella"] a__ : Union[str, Any] = pipe.get_label_ids(_UpperCAmelCase ) a__ : str = torch.manual_seed(0 ) a__ : int = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2_5 , output_type='''np''' ).images for word, image in zip(_UpperCAmelCase , _UpperCAmelCase ): a__ : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' F'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1E-1
688
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = (DPMSolverSDEScheduler,) UpperCAmelCase_ = 10 def A_ ( self : List[str], **_UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "noise_sampler_seed": 0, } config.update(**_UpperCAmelCase ) return config def A_ ( self : Tuple ) -> int: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def A_ ( self : int ) -> int: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase, beta_end=_UpperCAmelCase ) def A_ ( self : List[Any] ) -> List[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def A_ ( self : Optional[int] ) -> int: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def A_ ( self : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : int = self.dummy_model() SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : Dict = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def A_ ( self : List[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config(prediction_type="v_prediction" ) SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3 def A_ ( self : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : List[str] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3 def A_ ( self : str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_UpperCAmelCase, use_karras_sigmas=_UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps, device=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = self.dummy_model() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Optional[Any] = sample.to(_UpperCAmelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : str = scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
663
0
import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
486
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = ["image_processor", "tokenizer"] UpperCAmelCase_ = "AutoImageProcessor" UpperCAmelCase_ = "AutoTokenizer" def __init__( self : Tuple, _UpperCAmelCase : str=None, _UpperCAmelCase : str=None, **_UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", _UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : str = kwargs.pop("feature_extractor" ) SCREAMING_SNAKE_CASE__ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : int = self.image_processor SCREAMING_SNAKE_CASE__ : Any = False def __call__( self : List[str], *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Optional[Any]: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("images", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("text", _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : Optional[int] = args[0] SCREAMING_SNAKE_CASE__ : str = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase ) if text is not None: SCREAMING_SNAKE_CASE__ : str = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: SCREAMING_SNAKE_CASE__ : Optional[int] = encodings["input_ids"] return inputs def A_ ( self : Dict, *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase ) def A_ ( self : List[str], *_UpperCAmelCase : int, **_UpperCAmelCase : Dict ) -> Any: """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase ) @contextmanager def A_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer yield SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor SCREAMING_SNAKE_CASE__ : Union[str, Any] = False def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[Any]=None ) -> Any: """simple docstring""" if added_vocab is None: SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.get_added_vocab() SCREAMING_SNAKE_CASE__ : str = {} while tokens: SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE ) if start_token is None: break SCREAMING_SNAKE_CASE__ : Any = start_token.group(1 ) SCREAMING_SNAKE_CASE__ : Dict = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE ) SCREAMING_SNAKE_CASE__ : Any = start_token.group() if end_token is None: SCREAMING_SNAKE_CASE__ : List[str] = tokens.replace(_UpperCAmelCase, "" ) else: SCREAMING_SNAKE_CASE__ : Optional[int] = end_token.group() SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = re.escape(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE ) if content is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node SCREAMING_SNAKE_CASE__ : str = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase ) if value: if len(_UpperCAmelCase ) == 1: SCREAMING_SNAKE_CASE__ : str = value[0] SCREAMING_SNAKE_CASE__ : List[str] = value else: # leaf nodes SCREAMING_SNAKE_CASE__ : Optional[int] = [] for leaf in content.split(r"<sep/>" ): SCREAMING_SNAKE_CASE__ : Tuple = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": SCREAMING_SNAKE_CASE__ : str = leaf[1:-2] # for categorical special tokens output[key].append(_UpperCAmelCase ) if len(output[key] ) == 1: SCREAMING_SNAKE_CASE__ : str = output[key][0] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase ) if len(_UpperCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def A_ ( self : str ) -> Optional[Any]: """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, ) return self.image_processor_class @property def A_ ( self : int ) -> List[str]: """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, ) return self.image_processor
663
0
import glob import os import random from string import ascii_lowercase, digits import cva lowerCAmelCase = '''''' lowerCAmelCase = '''''' lowerCAmelCase = '''''' lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal) def __SCREAMING_SNAKE_CASE ( ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[int] = get_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print('''Processing...''' ) __UpperCAmelCase : str = update_image_and_anno(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for index, image in enumerate(SCREAMING_SNAKE_CASE__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCAmelCase : Tuple = random_chars(32 ) __UpperCAmelCase : Union[str, Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCAmelCase : Optional[int] = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(f"/{file_root}.jpg" , SCREAMING_SNAKE_CASE__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"Success {index+1}/{len(SCREAMING_SNAKE_CASE__ )} with {file_name}" ) __UpperCAmelCase : Tuple = [] for anno in new_annos[index]: __UpperCAmelCase : Any = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(SCREAMING_SNAKE_CASE__ ) with open(f"/{file_root}.txt" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[list, list]: '''simple docstring''' __UpperCAmelCase : Any = [] __UpperCAmelCase : Tuple = [] for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE__ , '''*.txt''' ) ): __UpperCAmelCase : str = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(SCREAMING_SNAKE_CASE__ ) as in_file: __UpperCAmelCase : Tuple = in_file.readlines() __UpperCAmelCase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , f"{label_name}.jpg" ) __UpperCAmelCase : Optional[Any] = [] for obj_list in obj_lists: __UpperCAmelCase : int = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(SCREAMING_SNAKE_CASE__ ) labels.append(SCREAMING_SNAKE_CASE__ ) return img_paths, labels def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 1 ) -> tuple[list, list, list]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[str] = [] for idx in range(len(SCREAMING_SNAKE_CASE__ ) ): __UpperCAmelCase : str = [] __UpperCAmelCase : Any = img_list[idx] path_list.append(SCREAMING_SNAKE_CASE__ ) __UpperCAmelCase : List[Any] = anno_list[idx] __UpperCAmelCase : str = cva.imread(SCREAMING_SNAKE_CASE__ ) if flip_type == 1: __UpperCAmelCase : Optional[int] = cva.flip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for bbox in img_annos: __UpperCAmelCase : Any = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __UpperCAmelCase : Any = cva.flip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for bbox in img_annos: __UpperCAmelCase : Any = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(SCREAMING_SNAKE_CASE__ ) new_imgs_list.append(SCREAMING_SNAKE_CASE__ ) return new_imgs_list, new_annos_lists, path_list def __SCREAMING_SNAKE_CASE ( lowercase_ = 32 ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" __UpperCAmelCase : Tuple = ascii_lowercase + digits return "".join(random.choice(SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
462
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCamelCase : List[str] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = ['''BartphoTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys _lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
663
0
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class _lowerCamelCase ( __lowerCamelCase ): """simple docstring""" snake_case = "Wav2Vec2FeatureExtractor" snake_case = "AutoTokenizer" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]: '''simple docstring''' super().__init__(_UpperCAmelCase , _UpperCAmelCase ) A_ : Tuple = self.feature_extractor A_ : Union[str, Any] = False @classmethod def _snake_case ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[int]: '''simple docstring''' try: return super().from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) except OSError: warnings.warn( F'''Loading a tokenizer inside {cls.__name__} from a config that does not''' ''' include a `tokenizer_class` attribute is deprecated and will be ''' '''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`''' ''' attribute to either your `config.json` or `tokenizer_config.json` ''' '''file to suppress this warning: ''' , _UpperCAmelCase , ) A_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) A_ : int = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) return cls(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[Any]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) A_ : Tuple = kwargs.pop('''raw_speech''' ) else: A_ : Dict = kwargs.pop('''audio''' , _UpperCAmelCase ) A_ : Any = kwargs.pop('''sampling_rate''' , _UpperCAmelCase ) A_ : Dict = kwargs.pop('''text''' , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: A_ : Optional[Any] = args[0] A_ : Tuple = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: A_ : Dict = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None: A_ : Tuple = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: A_ : List[str] = encodings["input_ids"] return inputs def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[int]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*_UpperCAmelCase , **_UpperCAmelCase ) A_ : Optional[Any] = kwargs.pop('''input_features''' , _UpperCAmelCase ) A_ : Optional[int] = kwargs.pop('''labels''' , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: A_ : Optional[Any] = args[0] A_ : Dict = args[1:] if input_features is not None: A_ : Dict = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) if labels is not None: A_ : str = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase ) if labels is None: return input_features elif input_features is None: return labels else: A_ : List[str] = labels["input_ids"] return input_features def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Optional[int]: '''simple docstring''' return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->int: '''simple docstring''' return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @contextmanager def _snake_case ( self )->Any: '''simple docstring''' warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) A_ : Dict = True A_ : int = self.tokenizer yield A_ : Optional[int] = self.feature_extractor A_ : Optional[Any] = False
590
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class lowerCamelCase : """simple docstring""" def __init__( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : Union[str, Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=3, _UpperCAmelCase : str=1_6, _UpperCAmelCase : Tuple=[1, 2, 1], _UpperCAmelCase : List[str]=[2, 2, 4], _UpperCAmelCase : Tuple=2, _UpperCAmelCase : str=2.0, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=0.0, _UpperCAmelCase : Any=0.0, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : int="gelu", _UpperCAmelCase : Any=False, _UpperCAmelCase : Any=True, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Any=1E-5, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : str=True, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : List[str]=8, _UpperCAmelCase : Union[str, Any]=["stage1", "stage2", "stage3"], _UpperCAmelCase : Any=[1, 2, 3], ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = image_size SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = num_channels SCREAMING_SNAKE_CASE__ : Optional[int] = embed_dim SCREAMING_SNAKE_CASE__ : List[Any] = depths SCREAMING_SNAKE_CASE__ : List[str] = num_heads SCREAMING_SNAKE_CASE__ : str = window_size SCREAMING_SNAKE_CASE__ : Any = mlp_ratio SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Tuple = drop_path_rate SCREAMING_SNAKE_CASE__ : Dict = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = use_absolute_embeddings SCREAMING_SNAKE_CASE__ : Tuple = patch_norm SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range SCREAMING_SNAKE_CASE__ : List[str] = is_training SCREAMING_SNAKE_CASE__ : List[Any] = scope SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_stride SCREAMING_SNAKE_CASE__ : List[Any] = out_features SCREAMING_SNAKE_CASE__ : Dict = out_indices def A_ ( self : List[str] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : str = None if self.use_labels: SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config() return config, pixel_values, labels def A_ ( self : Optional[int] ) -> List[str]: """simple docstring""" return MaskFormerSwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, ) def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) ) def A_ ( self : Optional[int], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskFormerSwinBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(_UpperCAmelCase ): SCREAMING_SNAKE_CASE__ : List[Any] = ["stem"] SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_UpperCAmelCase ) def A_ ( self : str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) UpperCAmelCase_ = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = MaskFormerSwinModelTester(self ) SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self, config_class=_UpperCAmelCase, embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def A_ ( self : Any ) -> List[Any]: """simple docstring""" pass def A_ ( self : Tuple ) -> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ ( self : int ) -> Optional[Any]: """simple docstring""" return def A_ ( self : Any ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def A_ ( self : List[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) @unittest.skip("Swin does not use inputs_embeds" ) def A_ ( self : Any ) -> Optional[int]: """simple docstring""" pass @unittest.skip("Swin does not support feedforward chunking" ) def A_ ( self : List[str] ) -> Optional[int]: """simple docstring""" pass def A_ ( self : Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase, nn.Linear ) ) def A_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1], _UpperCAmelCase ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def A_ ( self : Dict ) -> List[str]: """simple docstring""" pass def A_ ( self : List[str], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Dict = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCAmelCase ), _UpperCAmelCase ) # Swin has a different seq_length SCREAMING_SNAKE_CASE__ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], ) def A_ ( self : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : int = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) def A_ ( self : Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Optional[int] = 3 SCREAMING_SNAKE_CASE__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) SCREAMING_SNAKE_CASE__ : str = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) SCREAMING_SNAKE_CASE__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Any = True self.check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def A_ ( self : List[Any] ) -> Dict: """simple docstring""" pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def A_ ( self : Dict ) -> str: """simple docstring""" pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def A_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" pass def A_ ( self : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_UpperCAmelCase : Optional[Any] ): SCREAMING_SNAKE_CASE__ : Dict = 0 return t def check_equivalence(_UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any]={} ): with torch.no_grad(): SCREAMING_SNAKE_CASE__ : str = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = model(**_UpperCAmelCase, return_dict=_UpperCAmelCase, **_UpperCAmelCase ).to_tuple() def recursive_check(_UpperCAmelCase : int, _UpperCAmelCase : Dict ): if isinstance(_UpperCAmelCase, (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase, _UpperCAmelCase ): recursive_check(_UpperCAmelCase, _UpperCAmelCase ) elif isinstance(_UpperCAmelCase, _UpperCAmelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(_UpperCAmelCase, _UpperCAmelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_UpperCAmelCase ), set_nan_tensor_to_zero(_UpperCAmelCase ), atol=1E-5 ), msg=( "Tuple and dict output are not equal. Difference:" F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' F''' {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}. Dict has''' F''' `nan`: {torch.isnan(_UpperCAmelCase ).any()} and `inf`: {torch.isinf(_UpperCAmelCase )}.''' ), ) recursive_check(_UpperCAmelCase, _UpperCAmelCase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} ) SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase ) check_equivalence(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, {"output_hidden_states": True} ) @require_torch class lowerCamelCase (unittest.TestCase , __lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else () UpperCAmelCase_ = MaskFormerSwinConfig def A_ ( self : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self ) def A_ ( self : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Any = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str = backbone_class(_UpperCAmelCase ) backbone.to(_UpperCAmelCase ) backbone.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps, _UpperCAmelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels ): self.assertTrue(feature_map.shape[:2], (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True SCREAMING_SNAKE_CASE__ : Optional[int] = backbone(**_UpperCAmelCase, output_hidden_states=_UpperCAmelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ), len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:], backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels), (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: SCREAMING_SNAKE_CASE__ : int = backbone(**_UpperCAmelCase, output_attentions=_UpperCAmelCase ) self.assertIsNotNone(outputs.attentions )
663
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) _SCREAMING_SNAKE_CASE : int = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) _SCREAMING_SNAKE_CASE : Tuple = CLIPImageProcessor() _SCREAMING_SNAKE_CASE : Any = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") _SCREAMING_SNAKE_CASE : Dict = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
344
from ....configuration_utils import PretrainedConfig from ....utils import logging _lowerCamelCase : Any = logging.get_logger(__name__) # TODO: upload to AWS _lowerCamelCase : str = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = "retribert" def __init__( self : Optional[Any], _UpperCAmelCase : Dict=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : Tuple=8, _UpperCAmelCase : Optional[Any]=1_2, _UpperCAmelCase : Union[str, Any]=3_0_7_2, _UpperCAmelCase : Dict="gelu", _UpperCAmelCase : Tuple=0.1, _UpperCAmelCase : str=0.1, _UpperCAmelCase : List[str]=5_1_2, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Dict=0.02, _UpperCAmelCase : Any=1E-12, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Any=1_2_8, _UpperCAmelCase : int=0, **_UpperCAmelCase : List[str], ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = vocab_size SCREAMING_SNAKE_CASE__ : str = hidden_size SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = hidden_act SCREAMING_SNAKE_CASE__ : int = intermediate_size SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : List[str] = initializer_range SCREAMING_SNAKE_CASE__ : int = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[Any] = share_encoders SCREAMING_SNAKE_CASE__ : int = projection_dim
663
0
"""simple docstring""" import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __UpperCamelCase : @property def __a ( self ) -> Optional[Any]: return self.get_dummy_input() @property def __a ( self ) -> Any: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"""\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.""" ) def __a ( self , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , ) -> Dict: a : str = 4 a : Optional[Any] = 32 a : int = (32, 32) a : str = torch.manual_seed(0 ) a : Union[str, Any] = torch.device(_UpperCAmelCase ) a : str = (batch_size, num_channels) + sizes a : Tuple = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase ) a : List[str] = {"hidden_states": hidden_states} if include_temb: a : Optional[Any] = 128 a : Dict = randn_tensor((batch_size, temb_channels) , generator=_UpperCAmelCase , device=_UpperCAmelCase ) if include_res_hidden_states_tuple: a : List[str] = torch.manual_seed(1 ) a : str = (randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=_UpperCAmelCase ),) if include_encoder_hidden_states: a : Any = floats_tensor((batch_size, 32, 32) ).to(_UpperCAmelCase ) if include_skip_sample: a : List[str] = randn_tensor(((batch_size, 3) + sizes) , generator=_UpperCAmelCase , device=_UpperCAmelCase ) return dummy_input def __a ( self ) -> str: a : Union[str, Any] = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": a : Optional[int] = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) a : Dict = self.dummy_input return init_dict, inputs_dict def __a ( self , lowerCAmelCase__ ) -> Optional[Any]: a : int = self.prepare_init_args_and_inputs_for_common() a : Dict = self.block_class(**_UpperCAmelCase ) unet_block.to(_UpperCAmelCase ) unet_block.eval() with torch.no_grad(): a : Any = unet_block(**_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): a : Dict = output[0] self.assertEqual(output.shape , self.output_shape ) a : List[str] = output[0, -1, -3:, -3:] a : Any = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase ) assert torch_all_close(output_slice.flatten() , _UpperCAmelCase , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def __a ( self ) -> Optional[Any]: a : Any = self.prepare_init_args_and_inputs_for_common() a : str = self.block_class(**_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() a : List[str] = model(**_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): a : str = output[0] a : Tuple = torch.device(_UpperCAmelCase ) a : Tuple = randn_tensor(output.shape , device=_UpperCAmelCase ) a : int = torch.nn.functional.mse_loss(_UpperCAmelCase , _UpperCAmelCase ) loss.backward()
633
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCamelCase : int = False @skip_mps class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline UpperCAmelCase_ = False UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} ) UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def A_ ( cls : str ) -> Union[str, Any]: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) @classmethod def A_ ( cls : Tuple ) -> str: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) def A_ ( self : Any ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL( block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_2_8, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act="gelu", projection_dim=5_1_2, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModel(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=0 ) -> Optional[Any]: """simple docstring""" if str(_UpperCAmelCase ).startswith("mps" ): SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_UpperCAmelCase ) else: SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : str = { "prompt": "a cat and a frog", "token_indices": [2, 5], "generator": generator, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", "max_iter_to_alter": 2, "thresholds": {0: 0.7}, } return inputs def A_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = "cpu" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = pipe(**_UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 6_4, 6_4, 3) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) SCREAMING_SNAKE_CASE__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_UpperCAmelCase, 1E-3 ) def A_ ( self : str ) -> List[Any]: """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def A_ ( self : Any ) -> str: """simple docstring""" # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A_ ( self : Optional[Any] ) -> str: """simple docstring""" self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 ) def A_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def A_ ( self : Any ) -> List[str]: """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().test_save_load_local(expected_max_difference=5E-4 ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class lowerCamelCase (unittest.TestCase ): """simple docstring""" @classmethod def A_ ( cls : Union[str, Any] ) -> Tuple: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) @classmethod def A_ ( cls : List[str] ) -> List[str]: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_UpperCAmelCase ) def A_ ( self : str ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(5_1 ) SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=_UpperCAmelCase, torch_dtype=torch.floataa ) pipe.to("cuda" ) SCREAMING_SNAKE_CASE__ : List[str] = "a painting of an elephant with glasses" SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 7] SCREAMING_SNAKE_CASE__ : str = pipe( prompt=_UpperCAmelCase, token_indices=_UpperCAmelCase, guidance_scale=7.5, generator=_UpperCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0] SCREAMING_SNAKE_CASE__ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" ) assert np.abs((expected_image - image).max() ) < 5E-1
663
0
'''simple docstring''' from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class __lowerCAmelCase : '''simple docstring''' pass
665
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class lowerCamelCase : """simple docstring""" UpperCAmelCase_ = PegasusConfig UpperCAmelCase_ = {} UpperCAmelCase_ = "gelu" def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : int=7, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=False, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : Tuple=4, _UpperCAmelCase : str=3_7, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : List[str]=0.1, _UpperCAmelCase : Dict=4_0, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=1, _UpperCAmelCase : str=0, ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : List[Any] = seq_length SCREAMING_SNAKE_CASE__ : int = is_training SCREAMING_SNAKE_CASE__ : int = use_labels SCREAMING_SNAKE_CASE__ : Tuple = vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : str = eos_token_id SCREAMING_SNAKE_CASE__ : Dict = pad_token_id SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id def A_ ( self : int ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1 ) SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) SCREAMING_SNAKE_CASE__ : List[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) return config, inputs_dict def A_ ( self : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFPegasusModel(config=_UpperCAmelCase ).get_decoder() SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["input_ids"] SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :] SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["attention_mask"][:1, :] SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs_dict["head_mask"] SCREAMING_SNAKE_CASE__ : int = 1 # first forward pass SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, head_mask=_UpperCAmelCase, use_cache=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3), config.vocab_size ) SCREAMING_SNAKE_CASE__ : str = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([input_ids, next_tokens], axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.concat([attention_mask, next_attn_mask], axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE__ : Optional[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-3 ) def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Any: '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () UpperCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase_ = ( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase_ = True UpperCAmelCase_ = False UpperCAmelCase_ = False def A_ ( self : List[str] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = TFPegasusModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=_UpperCAmelCase ) def A_ ( self : Optional[Any] ) -> int: """simple docstring""" self.config_tester.run_common_tests() def A_ ( self : str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class lowerCamelCase (unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] UpperCAmelCase_ = [ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers UpperCAmelCase_ = "google/pegasus-xsum" @cached_property def A_ ( self : Union[str, Any] ) -> int: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def A_ ( self : str, **_UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.translate_src_text(**_UpperCAmelCase ) assert self.expected_text == generated_words def A_ ( self : Any, **_UpperCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(self.src_text, **_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors="tf" ) SCREAMING_SNAKE_CASE__ : List[str] = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def A_ ( self : List[Any] ) -> Any: """simple docstring""" self._assert_generated_batch_equal_expected()
663
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=__lowerCamelCase ) class __a ( __lowerCamelCase ): __snake_case : Dict = field(default="""automatic-speech-recognition""" ,metadata={"""include_in_asdict_even_if_is_default""": True} ) __snake_case : Dict = Features({"""audio""": Audio()} ) __snake_case : int = Features({"""transcription""": Value("""string""" )} ) __snake_case : str = """audio""" __snake_case : int = """transcription""" def A ( self : Dict , UpperCAmelCase : int ): if self.audio_column not in features: raise ValueError(F'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , _UpperCAmelCase ): raise ValueError(F'Column {self.audio_column} is not an Audio type.' ) lowerCAmelCase_ : str = copy.deepcopy(self ) lowerCAmelCase_ : Tuple = self.input_schema.copy() lowerCAmelCase_ : Dict = features[self.audio_column] lowerCAmelCase_ : List[str] = input_schema return task_template @property def A ( self : Union[str, Any] ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
600
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCamelCase : List[str] = { '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
663
0
import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechTaFeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch _lowerCamelCase : Dict = random.Random() def _lowerCAmelCase ( __magic_name__ :Any , __magic_name__ :Tuple=1.0 , __magic_name__ :List[Any]=None , __magic_name__ :List[Any]=None ): if rng is None: UpperCAmelCase_ = global_rng UpperCAmelCase_ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch class snake_case__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Tuple=4_00 , lowerCAmelCase_ : Any=20_00 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Optional[Any]=1_60_00 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=80 , lowerCAmelCase_ : Dict=16 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : int="hann_window" , lowerCAmelCase_ : int=80 , lowerCAmelCase_ : List[str]=76_00 , lowerCAmelCase_ : Optional[int]=1e-10 , lowerCAmelCase_ : Optional[int]=True , ) -> Any: UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = min_seq_length UpperCAmelCase_ = max_seq_length UpperCAmelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase_ = feature_size UpperCAmelCase_ = padding_value UpperCAmelCase_ = sampling_rate UpperCAmelCase_ = do_normalize UpperCAmelCase_ = num_mel_bins UpperCAmelCase_ = hop_length UpperCAmelCase_ = win_length UpperCAmelCase_ = win_function UpperCAmelCase_ = fmin UpperCAmelCase_ = fmax UpperCAmelCase_ = mel_floor UpperCAmelCase_ = return_attention_mask def UpperCamelCase ( self : Optional[Any] ) -> Tuple: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def UpperCamelCase ( self : Dict , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Any=False ) -> int: def _flatten(lowerCAmelCase_ : Any ): return list(itertools.chain(*_UpperCAmelCase ) ) if equal_length: UpperCAmelCase_ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCAmelCase_ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase_ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs] return speech_inputs def UpperCamelCase ( self : Any , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=False ) -> Optional[Any]: if equal_length: UpperCAmelCase_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCAmelCase_ = [ floats_list((x, self.num_mel_bins) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase_ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch class snake_case__ ( __lowerCamelCase , unittest.TestCase ): '''simple docstring''' __A = SpeechTaFeatureExtractor def UpperCamelCase ( self : Dict ) -> Any: UpperCAmelCase_ = SpeechTaFeatureExtractionTester(self ) def UpperCamelCase ( self : Dict , lowerCAmelCase_ : int ) -> Union[str, Any]: self.assertTrue(np.all(np.mean(_UpperCAmelCase , axis=0 ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) ) def UpperCamelCase ( self : Dict ) -> str: UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] UpperCAmelCase_ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase_ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) # Test batched UpperCAmelCase_ = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values UpperCAmelCase_ = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) def UpperCamelCase ( self : Dict ) -> List[Any]: UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] UpperCAmelCase_ = ["longest", "max_length", "do_not_pad"] UpperCAmelCase_ = [None, 16_00, None] for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase_ = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='''np''' ) UpperCAmelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self.assertTrue(input_values[0][8_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self.assertTrue(input_values[0][10_00:].sum() < 1e-6 ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def UpperCamelCase ( self : List[Any] ) -> List[Any]: UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ = range(8_00 , 14_00 , 2_00 ) UpperCAmelCase_ = [floats_list((1, x) )[0] for x in lengths] UpperCAmelCase_ = ["longest", "max_length", "do_not_pad"] UpperCAmelCase_ = [None, 16_00, None] for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ): UpperCAmelCase_ = feat_extract(_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase ) UpperCAmelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:8_00] ) self._check_zero_mean_unit_variance(input_values[1][:10_00] ) self._check_zero_mean_unit_variance(input_values[2][:12_00] ) def UpperCamelCase ( self : str ) -> Optional[Any]: UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] UpperCAmelCase_ = feat_extract( _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10_00 , padding='''max_length''' , return_tensors='''np''' ) UpperCAmelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase ( self : Optional[Any] ) -> Tuple: UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] UpperCAmelCase_ = feat_extract( _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10_00 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 10_00) ) UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] UpperCAmelCase_ = feat_extract( _UpperCAmelCase , truncation=_UpperCAmelCase , max_length=20_00 , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase_ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :8_00] ) self._check_zero_mean_unit_variance(input_values[1, :10_00] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 12_00) ) def UpperCamelCase ( self : str ) -> Tuple: UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCAmelCase_ = np.random.rand(1_00 ).astype(np.floataa ) UpperCAmelCase_ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCAmelCase_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCAmelCase_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase ( self : Optional[Any] ) -> Tuple: UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] UpperCAmelCase_ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] # Test feature size UpperCAmelCase_ = feature_extractor(audio_target=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_values self.assertTrue(input_values.ndim == 3 ) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins ) # Test not batched input UpperCAmelCase_ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values UpperCAmelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) # Test batched UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCAmelCase_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] UpperCAmelCase_ = np.asarray(_UpperCAmelCase ) UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) ) def UpperCamelCase ( self : Tuple ) -> Tuple: UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ = feat_extract.model_input_names[0] UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) ) UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase ) UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) UpperCAmelCase_ = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase_ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase ( self : Union[str, Any] ) -> int: UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase ) UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ = feat_extract.model_input_names[0] UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) UpperCAmelCase_ = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase_ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) ) @require_torch def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase_ = feat_extract.model_input_names[0] UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase_ = feat_extract.num_mel_bins # hack! UpperCAmelCase_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name] UpperCAmelCase_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def UpperCamelCase ( self : List[Any] ) -> str: UpperCAmelCase_ = self.feat_extract_dict UpperCAmelCase_ = True UpperCAmelCase_ = self.feature_extraction_class(**_UpperCAmelCase ) UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase_ = [len(_UpperCAmelCase ) for x in speech_inputs] UpperCAmelCase_ = feat_extract.model_input_names[0] UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase_ = feat_extract.num_mel_bins # hack! UpperCAmelCase_ = feat_extract.pad(_UpperCAmelCase , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _UpperCAmelCase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase ) def UpperCamelCase ( self : str ) -> Dict: UpperCAmelCase_ = self.feat_extract_dict UpperCAmelCase_ = True UpperCAmelCase_ = self.feature_extraction_class(**_UpperCAmelCase ) UpperCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_target() UpperCAmelCase_ = [len(_UpperCAmelCase ) for x in speech_inputs] UpperCAmelCase_ = feat_extract.model_input_names[0] UpperCAmelCase_ = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase_ = min(_UpperCAmelCase ) UpperCAmelCase_ = feat_extract.num_mel_bins # hack! UpperCAmelCase_ = feat_extract.pad( _UpperCAmelCase , padding='''max_length''' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _UpperCAmelCase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) def UpperCamelCase ( self : str , lowerCAmelCase_ : Optional[int] ) -> int: from datasets import load_dataset UpperCAmelCase_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech UpperCAmelCase_ = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def UpperCamelCase ( self : Dict ) -> List[Any]: UpperCAmelCase_ = torch.tensor( [2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03, 3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03, 2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04, 4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03, 7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04, 4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] ) # fmt: on UpperCAmelCase_ = self._load_datasamples(1 ) UpperCAmelCase_ = SpeechTaFeatureExtractor() UpperCAmelCase_ = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 9_36_80) ) self.assertTrue(torch.allclose(input_values[0, :30] , _UpperCAmelCase , atol=1e-6 ) ) def UpperCamelCase ( self : int ) -> Any: UpperCAmelCase_ = torch.tensor( [-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777, -3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386, -3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571, -3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] ) # fmt: on UpperCAmelCase_ = self._load_datasamples(1 ) UpperCAmelCase_ = SpeechTaFeatureExtractor() UpperCAmelCase_ = feature_extractor(audio_target=_UpperCAmelCase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 3_66, 80) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCAmelCase , atol=1e-4 ) )
121
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip''' def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our RoBERTa config:" , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : str = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : List[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : Any = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : str = roberta.model.classification_heads["mnli"].dense.weight SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"].dense.bias SCREAMING_SNAKE_CASE__ : int = roberta.model.classification_heads["mnli"].out_proj.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["mnli"].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads["mnli"](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) ) else: SCREAMING_SNAKE_CASE__ : int = roberta.model(SCREAMING_SNAKE_CASE__ )[0] print(our_output.shape , their_output.shape ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : int = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) print("Do both models output the same tensors?" , "🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": _lowerCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _lowerCamelCase : Any = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
663
0
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging UpperCamelCase__: str = logging.get_logger(__name__) def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=False ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: UpperCAmelCase : int = os.path.abspath(SCREAMING_SNAKE_CASE__ ) logger.info(f"""Loading PyTorch weights from {pt_path}""" ) UpperCAmelCase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" ) UpperCAmelCase : List[Any] = convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files UpperCAmelCase : Optional[Any] = convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return flax_state_dict def snake_case_ ( _lowerCAmelCase : Tuple[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, jnp.ndarray] , _lowerCAmelCase : str , ) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(_lowerCAmelCase : Tuple[str] ) -> bool: return len(set(SCREAMING_SNAKE_CASE__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm UpperCAmelCase : Dict = pt_tuple_key[:-1] + ("scale",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean UpperCAmelCase : Any = pt_tuple_key[:-1] + ("mean",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("var",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ): return renamed_pt_tuple_key, pt_tensor # embedding UpperCAmelCase : Any = pt_tuple_key[:-1] + ("embedding",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ): return renamed_pt_tuple_key, pt_tensor # conv layer UpperCAmelCase : int = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCAmelCase : str = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase : int = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCAmelCase : int = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 UpperCAmelCase : Union[str, Any] = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): UpperCAmelCase : Any = pt_tuple_key[-2] + "_g" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): UpperCAmelCase : str = pt_tuple_key[-2] + "_v" if name is not None: UpperCAmelCase : Dict = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ) -> List[Any]: UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCAmelCase : Optional[Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: UpperCAmelCase : Dict = flax_model.params["params"] else: UpperCAmelCase : Optional[int] = flax_model.params UpperCAmelCase : Optional[int] = flatten_dict(SCREAMING_SNAKE_CASE__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCAmelCase : Any = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Tuple = {} UpperCAmelCase : Tuple = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) UpperCAmelCase : Tuple = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase : Tuple = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary UpperCAmelCase : Tuple = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCAmelCase : int = pt_tuple_key[1:] # Correctly rename weight parameters UpperCAmelCase : Any = rename_key_and_reshape_tensor( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # add model prefix if necessary UpperCAmelCase : Dict = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCAmelCase : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: UpperCAmelCase : Any = jnp.asarray(SCREAMING_SNAKE_CASE__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) continue # also add unexpected weight so that warning is thrown UpperCAmelCase : List[str] = jnp.asarray(SCREAMING_SNAKE_CASE__ ) else: # also add unexpected weight so that warning is thrown UpperCAmelCase : List[Any] = jnp.asarray(SCREAMING_SNAKE_CASE__ ) return unflatten_dict(SCREAMING_SNAKE_CASE__ ) def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> int: import torch # Load the index UpperCAmelCase : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils UpperCAmelCase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCAmelCase : List[Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCAmelCase : List[str] = flax_model.params["params"] UpperCAmelCase : List[Any] = flatten_dict(SCREAMING_SNAKE_CASE__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: UpperCAmelCase : Any = flax_model.params UpperCAmelCase : List[str] = flatten_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Tuple = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) UpperCAmelCase : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCAmelCase : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary UpperCAmelCase : int = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCAmelCase : int = pt_tuple_key[1:] # Correctly rename weight parameters UpperCAmelCase : Dict = rename_key_and_reshape_tensor( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # add model prefix if necessary UpperCAmelCase : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCAmelCase : List[Any] = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: UpperCAmelCase : str = jnp.asarray(SCREAMING_SNAKE_CASE__ ) continue if "var" in flax_key[-1]: UpperCAmelCase : Tuple = jnp.asarray(SCREAMING_SNAKE_CASE__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) continue # also add unexpected weight so that warning is thrown UpperCAmelCase : Tuple = jnp.asarray(SCREAMING_SNAKE_CASE__ ) else: # also add unexpected weight so that warning is thrown UpperCAmelCase : str = jnp.asarray(SCREAMING_SNAKE_CASE__ ) return unflatten_dict(SCREAMING_SNAKE_CASE__ ) def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ) -> Union[str, Any]: UpperCAmelCase : Tuple = os.path.abspath(SCREAMING_SNAKE_CASE__ ) logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" ) # import correct flax class UpperCAmelCase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as state_f: try: UpperCAmelCase : Optional[Any] = from_bytes(SCREAMING_SNAKE_CASE__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> List[str]: try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights UpperCAmelCase : Any = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values() if any(SCREAMING_SNAKE_CASE__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) UpperCAmelCase : Dict = jax.tree_util.tree_map( lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : List[Any] = flatten_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Union[str, Any] = pt_model.state_dict() UpperCAmelCase : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) UpperCAmelCase : Any = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys UpperCAmelCase : Optional[int] = [] UpperCAmelCase : Union[str, Any] = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): UpperCAmelCase : Optional[Any] = flax_key_tuple[0] == pt_model.base_model_prefix UpperCAmelCase : Dict = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: UpperCAmelCase : Union[str, Any] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: UpperCAmelCase : Union[str, Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict: # conv layer UpperCAmelCase : int = flax_key_tuple[:-1] + ("weight",) UpperCAmelCase : Union[str, Any] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict: # linear layer UpperCAmelCase : int = flax_key_tuple[:-1] + ("weight",) UpperCAmelCase : Dict = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCAmelCase : str = flax_key_tuple[:-1] + ("weight",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: UpperCAmelCase : Optional[Any] = flax_key_tuple[:-1] + ("running_mean",) elif "var" in flax_key_tuple[-1]: UpperCAmelCase : str = flax_key_tuple[:-1] + ("running_var",) if "batch_stats" in flax_state: UpperCAmelCase : List[str] = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: UpperCAmelCase : Any = ".".join(SCREAMING_SNAKE_CASE__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. UpperCAmelCase : List[str] = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: UpperCAmelCase : Dict = key.split('''.''' ) UpperCAmelCase : Union[str, Any] = None if key_components[-3::2] == ["parametrizations", "original0"]: UpperCAmelCase : Union[str, Any] = key_components[-2] + "_g" elif key_components[-3::2] == ["parametrizations", "original1"]: UpperCAmelCase : str = key_components[-2] + "_v" if name is not None: UpperCAmelCase : List[Any] = key_components[:-3] + [name] UpperCAmelCase : str = ".".join(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase : Any = key if flax_key in special_pt_names: UpperCAmelCase : str = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict UpperCAmelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor UpperCAmelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) # remove from missing keys missing_keys.remove(SCREAMING_SNAKE_CASE__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(SCREAMING_SNAKE_CASE__ ) pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # re-transform missing_keys to list UpperCAmelCase : Dict = list(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" ) if len(SCREAMING_SNAKE_CASE__ ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" ''' use it for predictions and inference.''' ) else: logger.warning( f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n""" '''If your task is similar to the task the model of the checkpoint was trained on, ''' f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" ) return pt_model
127
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = "Wav2Vec2FeatureExtractor" UpperCAmelCase_ = "AutoTokenizer" def __init__( self : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> List[str]: """simple docstring""" super().__init__(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extractor SCREAMING_SNAKE_CASE__ : Union[str, Any] = False @classmethod def A_ ( cls : int, _UpperCAmelCase : Dict, **_UpperCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" try: return super().from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) except OSError: warnings.warn( F'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: ", _UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : int = WavaVecaCTCTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) return cls(feature_extractor=_UpperCAmelCase, tokenizer=_UpperCAmelCase ) def __call__( self : Optional[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("raw_speech" ) else: SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("audio", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("sampling_rate", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("text", _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0] SCREAMING_SNAKE_CASE__ : Tuple = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_UpperCAmelCase, *_UpperCAmelCase, sampling_rate=_UpperCAmelCase, **_UpperCAmelCase ) if text is not None: SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE__ : List[str] = encodings["input_ids"] return inputs def A_ ( self : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_UpperCAmelCase, **_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("input_features", _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("labels", _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] = args[0] SCREAMING_SNAKE_CASE__ : Dict = args[1:] if input_features is not None: SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor.pad(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase ) if labels is not None: SCREAMING_SNAKE_CASE__ : str = self.tokenizer.pad(_UpperCAmelCase, **_UpperCAmelCase ) if labels is None: return input_features elif input_features is None: return labels else: SCREAMING_SNAKE_CASE__ : List[str] = labels["input_ids"] return input_features def A_ ( self : Union[str, Any], *_UpperCAmelCase : str, **_UpperCAmelCase : Any ) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase ) def A_ ( self : Optional[int], *_UpperCAmelCase : Tuple, **_UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase ) @contextmanager def A_ ( self : Optional[int] ) -> Any: """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) SCREAMING_SNAKE_CASE__ : Dict = True SCREAMING_SNAKE_CASE__ : int = self.tokenizer yield SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor SCREAMING_SNAKE_CASE__ : Optional[Any] = False
663
0
'''simple docstring''' from math import factorial, radians def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : int = 18, lowerCamelCase__ : int = 10 ): _a = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians _a = radians(SCREAMING_SNAKE_CASE__ ) _a = angle_in_radians _a = 3 _a = -1 for _ in range(SCREAMING_SNAKE_CASE__ ): result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE__ ) _a = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": __import__("doctest").testmod()
131
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase : Tuple = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : List[Any] = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
663
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __SCREAMING_SNAKE_CASE = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def __a ( ): a__ : List[Any] = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: a__ : Any = get_sagemaker_input() else: a__ : Tuple = get_cluster_input() return config def __a ( lowerCAmelCase__ : Optional[int]=None ): if subparsers is not None: a__ : Any = subparsers.add_parser('''config''' , description=SCREAMING_SNAKE_CASE__ ) else: a__ : int = argparse.ArgumentParser('''Accelerate config command''' , description=SCREAMING_SNAKE_CASE__ ) parser.add_argument( '''--config_file''' , default=SCREAMING_SNAKE_CASE__ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=SCREAMING_SNAKE_CASE__ ) return parser def __a ( lowerCAmelCase__ : Union[str, Any] ): a__ : str = get_user_input() if args.config_file is not None: a__ : str = args.config_file else: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): os.makedirs(SCREAMING_SNAKE_CASE__ ) a__ : Optional[Any] = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(SCREAMING_SNAKE_CASE__ ) else: config.to_yaml_file(SCREAMING_SNAKE_CASE__ ) print(F'accelerate configuration saved at {config_file}' ) def __a ( ): a__ : str = config_command_parser() a__ : str = parser.parse_args() config_command(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
688
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : List[str] = '''▁''' _lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''} _lowerCamelCase : Dict = { '''vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''', }, '''monolingual_vocab_file''': { '''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''', }, } _lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4} class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = VOCAB_FILES_NAMES UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ = ["input_ids", "attention_mask"] def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Optional[int] = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids: SCREAMING_SNAKE_CASE__ : Dict = cnt cnt += 1 with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f: for line in f.readlines(): SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0] SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids ) if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids: SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy() SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto() return state def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = d # for backward compatibility if not hasattr(self, "sp_model_kwargs" ): SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id] SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A_ ( self : Any ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def A_ ( self : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase ) def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def A_ ( self : List[str], _UpperCAmelCase : str ) -> str: """simple docstring""" return self.fairseq_ids_to_tokens[index] def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip() return out_string def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE__ : List[str] = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase, "wb" ) as fi: SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F'''{str(_UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
663
0
import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def lowerCAmelCase_ ( __A ) -> str: '''simple docstring''' random.seed(SCREAMING_SNAKE_CASE__ ) np.random.seed(SCREAMING_SNAKE_CASE__ ) torch.manual_seed(SCREAMING_SNAKE_CASE__ ) torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE__ ) # ^^ safe to call this function even if cuda is not available class A : def __init__(self : Tuple , __UpperCAmelCase : Iterable[torch.nn.Parameter] , __UpperCAmelCase : float = 0.9999 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[float, int] = 1.0 , __UpperCAmelCase : Union[float, int] = 2 / 3 , __UpperCAmelCase : Optional[Any] = None , __UpperCAmelCase : Dict[str, Any] = None , **__UpperCAmelCase : Tuple , ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , torch.nn.Module ): UpperCAmelCase__ = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , _UpperCAmelCase , standard_warn=_UpperCAmelCase , ) UpperCAmelCase__ = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility UpperCAmelCase__ = True if kwargs.get("max_value" , _UpperCAmelCase ) is not None: UpperCAmelCase__ = "The `max_value` argument is deprecated. Please use `decay` instead." deprecate("max_value" , "1.0.0" , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) UpperCAmelCase__ = kwargs["max_value"] if kwargs.get("min_value" , _UpperCAmelCase ) is not None: UpperCAmelCase__ = "The `min_value` argument is deprecated. Please use `min_decay` instead." deprecate("min_value" , "1.0.0" , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) UpperCAmelCase__ = kwargs["min_value"] UpperCAmelCase__ = list(_UpperCAmelCase ) UpperCAmelCase__ = [p.clone().detach() for p in parameters] if kwargs.get("device" , _UpperCAmelCase ) is not None: UpperCAmelCase__ = "The `device` argument is deprecated. Please use `to` instead." deprecate("device" , "1.0.0" , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) self.to(device=kwargs["device"] ) UpperCAmelCase__ = None UpperCAmelCase__ = decay UpperCAmelCase__ = min_decay UpperCAmelCase__ = update_after_step UpperCAmelCase__ = use_ema_warmup UpperCAmelCase__ = inv_gamma UpperCAmelCase__ = power UpperCAmelCase__ = 0 UpperCAmelCase__ = None # set in `step()` UpperCAmelCase__ = model_cls UpperCAmelCase__ = model_config @classmethod def lowercase_ (cls : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> "EMAModel": """simple docstring""" UpperCAmelCase__ = model_cls.load_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase ) UpperCAmelCase__ = model_cls.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ = cls(model.parameters() , model_cls=_UpperCAmelCase , model_config=model.config ) ema_model.load_state_dict(_UpperCAmelCase ) return ema_model def lowercase_ (self : Dict , __UpperCAmelCase : Any ) -> Tuple: """simple docstring""" if self.model_cls is None: raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." ) if self.model_config is None: raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." ) UpperCAmelCase__ = self.model_cls.from_config(self.model_config ) UpperCAmelCase__ = self.state_dict() state_dict.pop("shadow_params" , _UpperCAmelCase ) model.register_to_config(**_UpperCAmelCase ) self.copy_to(model.parameters() ) model.save_pretrained(_UpperCAmelCase ) def lowercase_ (self : Any , __UpperCAmelCase : int ) -> float: """simple docstring""" UpperCAmelCase__ = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: UpperCAmelCase__ = 1 - (1 + step / self.inv_gamma) ** -self.power else: UpperCAmelCase__ = (1 + step) / (1_0 + step) UpperCAmelCase__ = min(_UpperCAmelCase , self.decay ) # make sure decay is not smaller than min_decay UpperCAmelCase__ = max(_UpperCAmelCase , self.min_decay ) return cur_decay_value @torch.no_grad() def lowercase_ (self : str , __UpperCAmelCase : Iterable[torch.nn.Parameter] ) -> int: """simple docstring""" if isinstance(_UpperCAmelCase , torch.nn.Module ): UpperCAmelCase__ = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , _UpperCAmelCase , standard_warn=_UpperCAmelCase , ) UpperCAmelCase__ = parameters.parameters() UpperCAmelCase__ = list(_UpperCAmelCase ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. UpperCAmelCase__ = self.get_decay(self.optimization_step ) UpperCAmelCase__ = decay UpperCAmelCase__ = 1 - decay UpperCAmelCase__ = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , _UpperCAmelCase ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): UpperCAmelCase__ = deepspeed.zero.GatheredParameters(_UpperCAmelCase , modifier_rank=_UpperCAmelCase ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(_UpperCAmelCase ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : Iterable[torch.nn.Parameter] ) -> None: """simple docstring""" UpperCAmelCase__ = list(_UpperCAmelCase ) for s_param, param in zip(self.shadow_params , _UpperCAmelCase ): param.data.copy_(s_param.to(param.device ).data ) def lowercase_ (self : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Tuple=None ) -> None: """simple docstring""" UpperCAmelCase__ = [ p.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if p.is_floating_point() else p.to(device=_UpperCAmelCase ) for p in self.shadow_params ] def lowercase_ (self : Dict ) -> dict: """simple docstring""" return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def lowercase_ (self : Tuple , __UpperCAmelCase : Iterable[torch.nn.Parameter] ) -> None: """simple docstring""" UpperCAmelCase__ = [param.detach().cpu().clone() for param in parameters] def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Iterable[torch.nn.Parameter] ) -> None: """simple docstring""" if self.temp_stored_params is None: raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" ) for c_param, param in zip(self.temp_stored_params , _UpperCAmelCase ): param.data.copy_(c_param.data ) # Better memory-wise. UpperCAmelCase__ = None def lowercase_ (self : List[Any] , __UpperCAmelCase : dict ) -> None: """simple docstring""" UpperCAmelCase__ = copy.deepcopy(_UpperCAmelCase ) UpperCAmelCase__ = state_dict.get("decay" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("Decay must be between 0 and 1" ) UpperCAmelCase__ = state_dict.get("min_decay" , self.min_decay ) if not isinstance(self.min_decay , _UpperCAmelCase ): raise ValueError("Invalid min_decay" ) UpperCAmelCase__ = state_dict.get("optimization_step" , self.optimization_step ) if not isinstance(self.optimization_step , _UpperCAmelCase ): raise ValueError("Invalid optimization_step" ) UpperCAmelCase__ = state_dict.get("update_after_step" , self.update_after_step ) if not isinstance(self.update_after_step , _UpperCAmelCase ): raise ValueError("Invalid update_after_step" ) UpperCAmelCase__ = state_dict.get("use_ema_warmup" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , _UpperCAmelCase ): raise ValueError("Invalid use_ema_warmup" ) UpperCAmelCase__ = state_dict.get("inv_gamma" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("Invalid inv_gamma" ) UpperCAmelCase__ = state_dict.get("power" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("Invalid power" ) UpperCAmelCase__ = state_dict.get("shadow_params" , _UpperCAmelCase ) if shadow_params is not None: UpperCAmelCase__ = shadow_params if not isinstance(self.shadow_params , _UpperCAmelCase ): raise ValueError("shadow_params must be a list" ) if not all(isinstance(_UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ): raise ValueError("shadow_params must all be Tensors" )
486
from random import shuffle import tensorflow as tf from numpy import array def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = int(SCREAMING_SNAKE_CASE__ ) assert noofclusters < len(SCREAMING_SNAKE_CASE__ ) # Find out the dimensionality SCREAMING_SNAKE_CASE__ : List[Any] = len(vectors[0] ) # Will help select random centroids from among the available vectors SCREAMING_SNAKE_CASE__ : List[Any] = list(range(len(SCREAMING_SNAKE_CASE__ ) ) ) shuffle(SCREAMING_SNAKE_CASE__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. SCREAMING_SNAKE_CASE__ : Tuple = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION SCREAMING_SNAKE_CASE__ : List[Any] = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points SCREAMING_SNAKE_CASE__ : Any = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(SCREAMING_SNAKE_CASE__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float64" , [dim] ) SCREAMING_SNAKE_CASE__ : Dict = [] for centroid in centroids: cent_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) SCREAMING_SNAKE_CASE__ : Tuple = [tf.Variable(0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value SCREAMING_SNAKE_CASE__ : Tuple = tf.placeholder("int32" ) SCREAMING_SNAKE_CASE__ : Tuple = [] for assignment in assignments: cluster_assigns.append(tf.assign(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input SCREAMING_SNAKE_CASE__ : int = tf.placeholder("float" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors SCREAMING_SNAKE_CASE__ : str = tf.reduce_mean(SCREAMING_SNAKE_CASE__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.placeholder("float" , [dim] ) SCREAMING_SNAKE_CASE__ : List[Any] = tf.placeholder("float" , [dim] ) SCREAMING_SNAKE_CASE__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.placeholder("float" , [noofclusters] ) SCREAMING_SNAKE_CASE__ : Tuple = tf.argmin(SCREAMING_SNAKE_CASE__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. SCREAMING_SNAKE_CASE__ : Tuple = tf.initialize_all_variables() # Initialize all variables sess.run(SCREAMING_SNAKE_CASE__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. SCREAMING_SNAKE_CASE__ : Tuple = 1_00 for _ in range(SCREAMING_SNAKE_CASE__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(SCREAMING_SNAKE_CASE__ ) ): SCREAMING_SNAKE_CASE__ : Any = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. SCREAMING_SNAKE_CASE__ : Tuple = [ sess.run(SCREAMING_SNAKE_CASE__ , feed_dict={va: vect, va: sess.run(SCREAMING_SNAKE_CASE__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input SCREAMING_SNAKE_CASE__ : Any = sess.run( SCREAMING_SNAKE_CASE__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(SCREAMING_SNAKE_CASE__ ): # Collect all the vectors assigned to this cluster SCREAMING_SNAKE_CASE__ : Dict = [ vectors[i] for i in range(len(SCREAMING_SNAKE_CASE__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location SCREAMING_SNAKE_CASE__ : str = sess.run( SCREAMING_SNAKE_CASE__ , feed_dict={mean_input: array(SCREAMING_SNAKE_CASE__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments SCREAMING_SNAKE_CASE__ : int = sess.run(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = sess.run(SCREAMING_SNAKE_CASE__ ) return centroids, assignments
663
0
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase : def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=3_0 , lowercase__=2 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=3_2 , lowercase__=2 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1_0 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=None , ): __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : Union[str, Any] = image_size __UpperCAmelCase : int = patch_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : Optional[int] = is_training __UpperCAmelCase : Optional[int] = use_labels __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : List[Any] = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : str = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : int = type_sequence_label_size __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Optional[int] = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2 __UpperCAmelCase : Optional[Any] = num_patches + 1 def A( self): __UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : List[str] = None if self.use_labels: __UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def A( self): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) def A( self , lowercase__ , lowercase__ , lowercase__): __UpperCAmelCase : Any = TFViTModel(config=_UpperCAmelCase) __UpperCAmelCase : Dict = model(_UpperCAmelCase , training=_UpperCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) # Test with an image with different size than the one specified in config. __UpperCAmelCase : Dict = self.image_size // 2 __UpperCAmelCase : Any = pixel_values[:, :, :image_size, :image_size] __UpperCAmelCase : List[str] = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase) __UpperCAmelCase : List[Any] = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size)) def A( self , lowercase__ , lowercase__ , lowercase__): __UpperCAmelCase : Optional[int] = self.type_sequence_label_size __UpperCAmelCase : Dict = TFViTForImageClassification(_UpperCAmelCase) __UpperCAmelCase : Dict = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # Test with an image with different size than the one specified in config. __UpperCAmelCase : Optional[Any] = self.image_size // 2 __UpperCAmelCase : Dict = pixel_values[:, :, :image_size, :image_size] __UpperCAmelCase : Optional[Any] = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase , training=_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : List[Any] = TFViTForImageClassification(_UpperCAmelCase) __UpperCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __UpperCAmelCase : Optional[int] = model(_UpperCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def A( self): __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() __UpperCAmelCase : Union[str, Any] = config_and_inputs __UpperCAmelCase : int = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): _lowerCAmelCase : Dict = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () _lowerCAmelCase : List[Any] = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) _lowerCAmelCase : Optional[Any] = False _lowerCAmelCase : Optional[int] = False _lowerCAmelCase : Dict = False def A( self): __UpperCAmelCase : Any = TFViTModelTester(self) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=3_7) def A( self): self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''') def A( self): pass @unittest.skip(reason='''ViT does not use inputs_embeds''') def A( self): pass def A( self): __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class(_UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer)) __UpperCAmelCase : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , tf.keras.layers.Layer)) def A( self): __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Any = model_class(_UpperCAmelCase) __UpperCAmelCase : List[str] = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : List[str] = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , _UpperCAmelCase) def A( self): __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def A( self): __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase) @slow def A( self): __UpperCAmelCase : Union[str, Any] = TFViTModel.from_pretrained('''google/vit-base-patch16-224''') self.assertIsNotNone(_UpperCAmelCase) def __SCREAMING_SNAKE_CASE ( ) -> int: '''simple docstring''' __UpperCAmelCase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCamelCase ( unittest.TestCase ): @cached_property def A( self): return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''') if is_vision_available() else None @slow def A( self): __UpperCAmelCase : List[str] = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''') __UpperCAmelCase : Tuple = self.default_image_processor __UpperCAmelCase : Tuple = prepare_img() __UpperCAmelCase : List[Any] = image_processor(images=_UpperCAmelCase , return_tensors='''tf''') # forward pass __UpperCAmelCase : int = model(**_UpperCAmelCase) # verify the logits __UpperCAmelCase : Tuple = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , _UpperCAmelCase) __UpperCAmelCase : Any = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6]) tf.debugging.assert_near(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4)
462
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( '''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion''' ) _lowerCamelCase : List[str] = None _lowerCamelCase : Union[str, Any] = { '''7B''': 1_1_0_0_8, '''13B''': 1_3_8_2_4, '''30B''': 1_7_9_2_0, '''65B''': 2_2_0_1_6, '''70B''': 2_8_6_7_2, } _lowerCamelCase : Optional[Any] = { '''7B''': 1, '''7Bf''': 1, '''13B''': 2, '''13Bf''': 2, '''30B''': 4, '''65B''': 8, '''70B''': 8, '''70Bf''': 8, } def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : str=2_56 ) -> int: '''simple docstring''' return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' with open(SCREAMING_SNAKE_CASE__ , "w" ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=True ) -> int: '''simple docstring''' os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , "tmp" ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(SCREAMING_SNAKE_CASE__ , "params.json" ) ) SCREAMING_SNAKE_CASE__ : int = NUM_SHARDS[model_size] SCREAMING_SNAKE_CASE__ : Union[str, Any] = params["n_layers"] SCREAMING_SNAKE_CASE__ : List[str] = params["n_heads"] SCREAMING_SNAKE_CASE__ : Optional[Any] = n_heads // num_shards SCREAMING_SNAKE_CASE__ : str = params["dim"] SCREAMING_SNAKE_CASE__ : List[str] = dim // n_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_0_0_0_0.0 SCREAMING_SNAKE_CASE__ : Tuple = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE__ , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: SCREAMING_SNAKE_CASE__ : int = params["n_kv_heads"] # for GQA / MQA SCREAMING_SNAKE_CASE__ : Optional[int] = n_heads_per_shard // num_key_value_heads SCREAMING_SNAKE_CASE__ : int = dim // num_key_value_heads else: # compatibility with other checkpoints SCREAMING_SNAKE_CASE__ : Dict = n_heads SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard SCREAMING_SNAKE_CASE__ : Dict = dim # permute for sliced rotary def permute(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=n_heads , SCREAMING_SNAKE_CASE__ : List[str]=dim , SCREAMING_SNAKE_CASE__ : Dict=dim ): return w.view(SCREAMING_SNAKE_CASE__ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) SCREAMING_SNAKE_CASE__ : Dict = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , "consolidated.00.pth" ) , map_location="cpu" ) else: # Sharded SCREAMING_SNAKE_CASE__ : List[Any] = [ torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , f'''consolidated.{i:02d}.pth''' ) , map_location="cpu" ) for i in range(SCREAMING_SNAKE_CASE__ ) ] SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : List[str] = {"weight_map": {}} for layer_i in range(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded SCREAMING_SNAKE_CASE__ : List[Any] = { f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wq.weight'''] ), f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[f'''layers.{layer_i}.attention.wk.weight'''] ), f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''], f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''], f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''], f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''], f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''], f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''], f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. SCREAMING_SNAKE_CASE__ : Any = { f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.attention_norm.weight''' ].clone(), f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ f'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } SCREAMING_SNAKE_CASE__ : int = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : Tuple = permute( torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : List[str] = torch.cat( [ loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ) ] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : int = torch.cat( [loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ) SCREAMING_SNAKE_CASE__ : List[str] = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ) SCREAMING_SNAKE_CASE__ : Tuple = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ) SCREAMING_SNAKE_CASE__ : int = torch.cat( [loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ) SCREAMING_SNAKE_CASE__ : List[str] = inv_freq for k, v in state_dict.items(): SCREAMING_SNAKE_CASE__ : str = filename param_count += v.numel() torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded SCREAMING_SNAKE_CASE__ : List[str] = { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } else: SCREAMING_SNAKE_CASE__ : Optional[Any] = { "model.norm.weight": loaded[0]["norm.weight"], "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ), } for k, v in state_dict.items(): SCREAMING_SNAKE_CASE__ : Optional[int] = filename param_count += v.numel() torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # Write configs SCREAMING_SNAKE_CASE__ : Optional[Any] = {"total_size": param_count * 2} write_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , "pytorch_model.bin.index.json" ) ) SCREAMING_SNAKE_CASE__ : List[str] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 SCREAMING_SNAKE_CASE__ : Dict = params["multiple_of"] if "multiple_of" in params else 2_56 SCREAMING_SNAKE_CASE__ : Dict = LlamaConfig( hidden_size=SCREAMING_SNAKE_CASE__ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=SCREAMING_SNAKE_CASE__ , ) config.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model." ) SCREAMING_SNAKE_CASE__ : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ ) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format." ) model.save_pretrained(SCREAMING_SNAKE_CASE__ , safe_serialization=SCREAMING_SNAKE_CASE__ ) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_class(SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) def _a ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() parser.add_argument( "--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , ) parser.add_argument( "--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , ) parser.add_argument( "--output_dir" , help="Location to write HF model and tokenizer" , ) parser.add_argument("--safe_serialization" , type=SCREAMING_SNAKE_CASE__ , help="Whether or not to save using `safetensors`." ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(args.input_dir , "tokenizer.model" ) write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
663
0
import socket def _SCREAMING_SNAKE_CASE ( ): A_ : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) A_ : str = socket.gethostname() A_ : Tuple = 12_312 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: A_ : Tuple = sock.recv(1_024 ) if not data: break out_file.write(SCREAMING_SNAKE_CASE__ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
590
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase (__lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = GPTaTokenizer UpperCAmelCase_ = GPTaTokenizerFast UpperCAmelCase_ = True UpperCAmelCase_ = {"add_prefix_space": True} UpperCAmelCase_ = False def A_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : Any = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] SCREAMING_SNAKE_CASE__ : int = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] SCREAMING_SNAKE_CASE__ : Any = {"unk_token": "<unk>"} SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] ) SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file, "w", encoding="utf-8" ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + "\n" ) with open(self.merges_file, "w", encoding="utf-8" ) as fp: fp.write("\n".join(_UpperCAmelCase ) ) def A_ ( self : Tuple, **_UpperCAmelCase : str ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def A_ ( self : int, **_UpperCAmelCase : Union[str, Any] ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase ) def A_ ( self : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "lower newer" SCREAMING_SNAKE_CASE__ : List[Any] = "lower newer" return input_text, output_text def A_ ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ : Tuple = "lower newer" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase ) def A_ ( self : Dict ) -> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = "lower newer" # Testing tokenization SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_UpperCAmelCase, add_prefix_space=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase ) # Testing the unknown token SCREAMING_SNAKE_CASE__ : Dict = tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), _UpperCAmelCase ) def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def A_ ( self : Optional[Any], _UpperCAmelCase : int=1_5 ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase ) # Simple input SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : List[str] = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE__ : Any = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE__ : List[Any] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Simple input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Simple input self.assertRaises( _UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", ) # Pair input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Pair input self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" ) # Pair input self.assertRaises( _UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", ) def A_ ( self : Tuple ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname, pad_token="<pad>" ) # Simple input SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : Dict = ["This is a simple input looooooooong", "This is a simple input"] SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair") SCREAMING_SNAKE_CASE__ : int = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding="max_length", max_length=3_0, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(*_UpperCAmelCase, padding="max_length", max_length=6_0, return_tensors="np" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, truncate=_UpperCAmelCase, return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1], 3_0 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1], 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1], 6_0 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1], 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def A_ ( self : str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = "$$$" SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname, bos_token=_UpperCAmelCase, add_bos_token=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = "This is a simple input" SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"] SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_UpperCAmelCase ) self.assertEqual(out_s.input_ids[0], _UpperCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0], _UpperCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def A_ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass def A_ ( self : Dict ) -> str: """simple docstring""" # TODO: change to self.get_tokenizers() when the fast version is implemented SCREAMING_SNAKE_CASE__ : Any = [self.get_tokenizer(do_lower_case=_UpperCAmelCase, add_bos_token=_UpperCAmelCase )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE__ : List[Any] = "Encode this." SCREAMING_SNAKE_CASE__ : Optional[Any] = "This one too please." SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) encoded_sequence += tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus( _UpperCAmelCase, _UpperCAmelCase, add_special_tokens=_UpperCAmelCase, return_special_tokens_mask=_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["input_ids"] SCREAMING_SNAKE_CASE__ : Any = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase ) ] SCREAMING_SNAKE_CASE__ : List[Any] = [x for x in filtered_sequence if x is not None] self.assertEqual(_UpperCAmelCase, _UpperCAmelCase ) @require_tokenizers class lowerCamelCase (unittest.TestCase ): """simple docstring""" def A_ ( self : Optional[Any] ) -> int: """simple docstring""" # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = "A photo of a cat" SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("test_opt" ) SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("./test_opt" ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def A_ ( self : Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = "A photo of a cat" SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode( _UpperCAmelCase, ) # Same as above self.assertEqual(_UpperCAmelCase, [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip("This test is failing because of a bug in the fast tokenizer" ) def A_ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=_UpperCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = "bos" SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.get_vocab()["bos"] SCREAMING_SNAKE_CASE__ : Tuple = "A photo of a cat" SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode( _UpperCAmelCase, ) # We changed the bos token self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained("./tok" ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("./tok" ) self.assertTrue(tokenizer.is_fast ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode( _UpperCAmelCase, ) self.assertEqual(_UpperCAmelCase, [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
663
0
def _lowercase ( __lowerCamelCase : int = 1000 ) -> int: '''simple docstring''' UpperCamelCase__ : Any = -1 UpperCamelCase__ : Any = 0 for a in range(1 ,n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c UpperCamelCase__ : str = (n * n - 2 * a * n) // (2 * n - 2 * a) UpperCamelCase__ : str = n - a - b if c * c == (a * a + b * b): UpperCamelCase__ : int = a * b * c if candidate >= product: UpperCamelCase__ : Union[str, Any] = candidate return product if __name__ == "__main__": print(F'{solution() = }')
344
from functools import lru_cache def _a ( SCREAMING_SNAKE_CASE__ : int ) -> set: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(SCREAMING_SNAKE_CASE__ ) if n > 1: factors.add(SCREAMING_SNAKE_CASE__ ) return factors @lru_cache def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) ) def _a ( SCREAMING_SNAKE_CASE__ : list ) -> bool: '''simple docstring''' return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1) def _a ( SCREAMING_SNAKE_CASE__ : int ) -> list: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 2 while True: # Increment each value of a generated range SCREAMING_SNAKE_CASE__ : List[str] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )] # Run elements through out unique_prime_factors function # Append our target number to the end. SCREAMING_SNAKE_CASE__ : Tuple = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group] checker.append(SCREAMING_SNAKE_CASE__ ) # If all numbers in the list are equal, return the group variable. if equality(SCREAMING_SNAKE_CASE__ ): return group # Increment our base variable by 1 base += 1 def _a ( SCREAMING_SNAKE_CASE__ : int = 4 ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = run(SCREAMING_SNAKE_CASE__ ) return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None if __name__ == "__main__": print(solution())
663
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : str = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
633
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class lowerCamelCase (unittest.TestCase ): """simple docstring""" @require_torch def A_ ( self : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = pipeline( task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" ) SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" ) SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"] SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], ) @unittest.skip("No models are available in TF" ) def A_ ( self : str ) -> Dict: """simple docstring""" pass @slow @require_torch def A_ ( self : str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline( task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", ) # This is an audio of a dog SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"] SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ) SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) SCREAMING_SNAKE_CASE__ : Any = audio_classifier( [audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 ) self.assertEqual( nested_simplify(_UpperCAmelCase ), [ [ {"score": 0.999, "label": "Sound of a dog"}, {"score": 0.001, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) @unittest.skip("No models are available in TF" ) def A_ ( self : str ) -> List[str]: """simple docstring""" pass
663
0
'''simple docstring''' def __snake_case ( ): '''simple docstring''' return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(lowerCamelCase_ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'''{solution() = }''')
664
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict: __magic_name__ = size if size is not None else {"height": 18, "width": 18} __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = num_channels __magic_name__ = image_size __magic_name__ = min_resolution __magic_name__ = max_resolution __magic_name__ = do_resize __magic_name__ = size __magic_name__ = do_normalize __magic_name__ = image_mean __magic_name__ = image_std def __A ( self : int ) -> List[str]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase_ ( A , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None def __A ( self : Dict ) -> Any: __magic_name__ = DPTImageProcessingTester(self ) @property def __A ( self : str ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Tuple ) -> List[str]: __magic_name__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(_lowerCamelCase , "size" ) ) def __A ( self : List[str] ) -> List[Any]: __magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) __magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def __A ( self : Union[str, Any] ) -> List[str]: # Initialize image_processing __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __A ( self : Dict ) -> Optional[Any]: # Initialize image_processing __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __A ( self : Optional[int] ) -> Dict: # Initialize image_processing __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
664
1
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( A , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = OpenAIGPTTokenizer UpperCAmelCase__ : str = OpenAIGPTTokenizerFast UpperCAmelCase__ : Dict = True UpperCAmelCase__ : Optional[int] = False def __A ( self : Dict ) -> Dict: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __magic_name__ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] __magic_name__ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) __magic_name__ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(_lowerCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(_lowerCamelCase ) ) def __A ( self : int , _lowerCamelCase : Optional[int] ) -> Tuple: return "lower newer", "lower newer" def __A ( self : Tuple ) -> List[str]: __magic_name__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) __magic_name__ = "lower" __magic_name__ = ["low", "er</w>"] __magic_name__ = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = tokens + ["<unk>"] __magic_name__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) def __A ( self : Dict , _lowerCamelCase : Tuple=15 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __magic_name__ = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) # Simple input __magic_name__ = "This is a simple input" __magic_name__ = ["This is a simple input 1", "This is a simple input 2"] __magic_name__ = ("This is a simple input", "This is a pair") __magic_name__ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) # Simple input self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) # Simple input self.assertRaises( _lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) # Pair input self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) # Pair input self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) # Pair input self.assertRaises( _lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) def __A ( self : Optional[Any] ) -> List[str]: pass @require_ftfy @require_spacy @require_tokenizers class UpperCamelCase_ ( A ): """simple docstring""" pass
664
'''simple docstring''' import numpy class UpperCamelCase_ : """simple docstring""" def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None: __magic_name__ = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. __magic_name__ = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. __magic_name__ = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. __magic_name__ = numpy.random.rand(3 , 1 ) # Real output values provided. __magic_name__ = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. __magic_name__ = numpy.zeros(output_array.shape ) def __A ( self : int ) -> numpy.ndarray: __magic_name__ = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. __magic_name__ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. __magic_name__ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def __A ( self : Dict ) -> None: __magic_name__ = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) __magic_name__ = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) __magic_name__ = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None: for iteration in range(1 , iterations + 1 ): __magic_name__ = self.feedforward() self.back_propagation() if give_loss: __magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'Iteration {iteration} Loss: {loss}' ) def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int: __magic_name__ = input_arr __magic_name__ = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) __magic_name__ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) __magic_name__ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def __snake_case ( lowerCamelCase_ : numpy.ndarray ): '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def __snake_case ( lowerCamelCase_ : numpy.ndarray ): '''simple docstring''' return (value) * (1 - (value)) def __snake_case ( ): '''simple docstring''' __magic_name__ = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. __magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. __magic_name__ = TwoHiddenLayerNeuralNetwork( input_array=lowerCamelCase_ , output_array=lowerCamelCase_ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
664
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ : int ={ 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : str =[ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
664
'''simple docstring''' import torch from transformers import AutoModel class UpperCamelCase_ ( torch.nn.Module ): """simple docstring""" def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]: super(_lowerCamelCase , self ).__init__() __magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase ) __magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 ) __magic_name__ = torch.nn.Softmax(dim=1 ) def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]: return self.bert(**_lowerCamelCase ).last_hidden_state def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict: return token_embeddings.sum(2 , keepdim=_lowerCamelCase ) def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]: return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) ) def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]: __magic_name__ = W_supports["sizes"].tolist() __magic_name__ = W_supports["start_token_id"].item() __magic_name__ = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __magic_name__ = self.BERT(**_lowerCamelCase ) __magic_name__ = self.BERT(**_lowerCamelCase ) __magic_name__ = None __magic_name__ = None __magic_name__ = W_supports["input_ids"] == start_token_id __magic_name__ = W_supports["input_ids"] == end_token_id for i, size in enumerate(_lowerCamelCase ): if i == 0: __magic_name__ = 0 else: __magic_name__ = support_sizes[i - 1] __magic_name__ = S[s : s + size][start_token_masks[s : s + size]] __magic_name__ = S[s : s + size][end_token_masks[s : s + size]] __magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __magic_name__ = torch.vstack((p_starts, p_start) ) __magic_name__ = torch.vstack((p_ends, p_end) ) else: __magic_name__ = p_start __magic_name__ = p_end return p_starts, p_ends
664
1
'''simple docstring''' import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class UpperCamelCase_ ( A ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = '''MCTCTFeatureExtractor''' UpperCAmelCase__ : Optional[int] = '''AutoTokenizer''' def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ) -> Tuple: super().__init__(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = self.feature_extractor __magic_name__ = False def __call__( self : Dict , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : List[str] ) -> int: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowerCamelCase , **_lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) __magic_name__ = kwargs.pop("raw_speech" ) else: __magic_name__ = kwargs.pop("audio" , _lowerCamelCase ) __magic_name__ = kwargs.pop("sampling_rate" , _lowerCamelCase ) __magic_name__ = kwargs.pop("text" , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: __magic_name__ = args[0] __magic_name__ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: __magic_name__ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase ) if text is not None: __magic_name__ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: __magic_name__ = encodings["input_ids"] return inputs def __A ( self : List[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : List[str] ) -> Dict: return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def __A ( self : Union[str, Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Dict ) -> Union[str, Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase ) __magic_name__ = kwargs.pop("input_features" , _lowerCamelCase ) __magic_name__ = kwargs.pop("labels" , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: __magic_name__ = args[0] __magic_name__ = args[1:] if input_features is not None: __magic_name__ = self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) if labels is not None: __magic_name__ = self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: __magic_name__ = labels["input_ids"] return input_features def __A ( self : Optional[Any] , *_lowerCamelCase : str , **_lowerCamelCase : Any ) -> Dict: return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @contextmanager def __A ( self : List[str] ) -> Optional[Any]: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) __magic_name__ = True __magic_name__ = self.tokenizer yield __magic_name__ = self.feature_extractor __magic_name__ = False
664
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
664
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ) -> str: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ = DisjunctiveConstraint(_lowerCamelCase ) self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) ) with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __A ( self : List[Any] ) -> str: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint(_lowerCamelCase ) # fails here def __A ( self : List[Any] ) -> int: __magic_name__ = [[1, 2, 3], [1, 2, 4]] __magic_name__ = DisjunctiveConstraint(_lowerCamelCase ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 ) __magic_name__ = stepped is True and completed is False and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 ) __magic_name__ = stepped is True and completed is False and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 ) __magic_name__ = stepped is True and completed is True and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __A ( self : Any ) -> Union[str, Any]: __magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ = DisjunctiveConstraint(_lowerCamelCase ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
664
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' __magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ ) __magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ ) __magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ ) __magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": __magic_name__ = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": __magic_name__ = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global]." ) # Encoder for layer_index in range(config.num_layers ): __magic_name__ = F'layers_{str(lowerCamelCase_ )}' # Self-Attention __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization __magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: __magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: __magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization __magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning __magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"] __magic_name__ = tax_attention_key __magic_name__ = tax_attention_out __magic_name__ = tax_attention_query __magic_name__ = tax_attention_value __magic_name__ = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ = tax_global_layer_norm if split_mlp_wi: __magic_name__ = tax_mlp_wi_a __magic_name__ = tax_mlp_wi_a else: __magic_name__ = tax_mlp_wi __magic_name__ = tax_mlp_wo __magic_name__ = tax_mlp_layer_norm __magic_name__ = flax_model_encoder_layer_block # Only for layer 0: __magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T __magic_name__ = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T __magic_name__ = tax_encoder_global_rel_embedding # Assigning __magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"] __magic_name__ = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): __magic_name__ = F'layers_{str(lowerCamelCase_ )}' # Self-Attention __magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization __magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention __magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] __magic_name__ = tax_enc_dec_attention_module["key"]["kernel"] __magic_name__ = tax_enc_dec_attention_module["out"]["kernel"] __magic_name__ = tax_enc_dec_attention_module["query"]["kernel"] __magic_name__ = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization __magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: __magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: __magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization __magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning __magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"] __magic_name__ = tax_attention_key __magic_name__ = tax_attention_out __magic_name__ = tax_attention_query __magic_name__ = tax_attention_value __magic_name__ = tax_pre_attention_layer_norm __magic_name__ = tax_enc_dec_attention_key __magic_name__ = tax_enc_dec_attention_out __magic_name__ = tax_enc_dec_attention_query __magic_name__ = tax_enc_dec_attention_value __magic_name__ = tax_cross_layer_norm if split_mlp_wi: __magic_name__ = tax_mlp_wi_a __magic_name__ = tax_mlp_wi_a else: __magic_name__ = tax_mlp_wi __magic_name__ = tax_mlp_wo __magic_name__ = txa_mlp_layer_norm __magic_name__ = flax_model_decoder_layer_block # Decoder Normalization __magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"] __magic_name__ = txa_decoder_norm # Only for layer 0: __magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T __magic_name__ = tax_decoder_rel_embedding # Token Embeddings __magic_name__ = tax_model["target"]["token_embedder"]["embedding"] __magic_name__ = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: __magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(lowerCamelCase_ ) print("T5X Model was sucessfully converted!" ) if __name__ == "__main__": __magic_name__ : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) __magic_name__ : Optional[int] =parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
664
1
'''simple docstring''' def __snake_case ( lowerCamelCase_ : int ): '''simple docstring''' __magic_name__ = int(lowerCamelCase_ ) if decimal in (0, 1): # Exit cases for the recursion return str(lowerCamelCase_ ) __magic_name__ , __magic_name__ = divmod(lowerCamelCase_ , 2 ) return binary_recursive(lowerCamelCase_ ) + str(lowerCamelCase_ ) def __snake_case ( lowerCamelCase_ : str ): '''simple docstring''' __magic_name__ = str(lowerCamelCase_ ).strip() if not number: raise ValueError("No input value was provided" ) __magic_name__ = "-" if number.startswith("-" ) else "" __magic_name__ = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F'{negative}0b{binary_recursive(int(lowerCamelCase_ ) )}' if __name__ == "__main__": from doctest import testmod testmod()
664
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class UpperCamelCase_ ( unittest.TestCase , A ): """simple docstring""" def __A ( self : Optional[int] ) -> Any: __magic_name__ = load_tool("text-to-speech" ) self.tool.setup() def __A ( self : Union[str, Any] ) -> int: # SpeechT5 isn't deterministic torch.manual_seed(0 ) __magic_name__ = self.tool("hey" ) __magic_name__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) ) def __A ( self : List[str] ) -> int: # SpeechT5 isn't deterministic torch.manual_seed(0 ) __magic_name__ = self.tool("hey" ) __magic_name__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
664
1
'''simple docstring''' from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
664
'''simple docstring''' import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm __magic_name__ : Dict =re.compile('[^A-Za-z_0-9]') # parameters used in DuplicationIndex __magic_name__ : int =10 __magic_name__ : Union[str, Any] =2_56 def __snake_case ( lowerCamelCase_ : List[str] ): '''simple docstring''' if len(lowerCamelCase_ ) < MIN_NUM_TOKENS: return None __magic_name__ = MinHash(num_perm=lowerCamelCase_ ) for token in set(lowerCamelCase_ ): min_hash.update(token.encode() ) return min_hash def __snake_case ( lowerCamelCase_ : str ): '''simple docstring''' return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0} class UpperCamelCase_ : """simple docstring""" def __init__( self : int , *, _lowerCamelCase : float = 0.85 , ) -> Optional[Any]: __magic_name__ = duplication_jaccard_threshold __magic_name__ = NUM_PERM __magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __magic_name__ = defaultdict(_lowerCamelCase ) def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None: __magic_name__ = self._index.query(_lowerCamelCase ) if code_key in self._index.keys: print(f'Duplicate key {code_key}' ) return self._index.insert(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_lowerCamelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase ) def __A ( self : Union[str, Any] ) -> List[List[Dict]]: __magic_name__ = [] for base, duplicates in self._duplicate_clusters.items(): __magic_name__ = [base] + list(_lowerCamelCase ) # reformat the cluster to be a list of dict __magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster] duplicate_clusters.append(_lowerCamelCase ) return duplicate_clusters def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None: __magic_name__ = self.get_duplicate_clusters() with open(_lowerCamelCase , "w" ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) def __snake_case ( lowerCamelCase_ : List[Any] ): '''simple docstring''' __magic_name__ , __magic_name__ = element __magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def __snake_case ( lowerCamelCase_ : Type[Dataset] ): '''simple docstring''' with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ): if data is not None: yield data def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ): '''simple docstring''' __magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ): di.add(lowerCamelCase_ , lowerCamelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ): '''simple docstring''' __magic_name__ = get_tokens(lowerCamelCase_ ) __magic_name__ = get_tokens(lowerCamelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) __magic_name__ : List[str] =None def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ): '''simple docstring''' __magic_name__ = [] for elementa in cluster: __magic_name__ = _shared_dataset[elementa["base_index"]]["content"] for elementa in extremes: __magic_name__ = _shared_dataset[elementa["base_index"]]["content"] if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __magic_name__ = 1 extremes.append(lowerCamelCase_ ) return extremes def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' global _shared_dataset __magic_name__ = dataset __magic_name__ = [] __magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ): extremes_list.append(lowerCamelCase_ ) return extremes_list def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ): '''simple docstring''' __magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ ) __magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster} __magic_name__ = {} __magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for extremes in extremes_clusters: for element in extremes: __magic_name__ = element __magic_name__ = duplicate_indices - set(extreme_dict.keys() ) __magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __magic_name__ = element["base_index"] in extreme_dict if element["is_extreme"]: __magic_name__ = extreme_dict[element["base_index"]]["copies"] print(F'Original dataset size: {len(lowerCamelCase_ )}' ) print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' ) print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' ) print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' ) print(F'Filtered dataset size: {len(lowerCamelCase_ )}' ) return ds_filter, duplicate_clusters
664
1
'''simple docstring''' def __snake_case ( lowerCamelCase_ : int = 1000 ): '''simple docstring''' __magic_name__ , __magic_name__ = 1, 1 __magic_name__ = 2 while True: __magic_name__ = 0 __magic_name__ = fa + fa __magic_name__ , __magic_name__ = fa, f index += 1 for _ in str(lowerCamelCase_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
664
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('0.8.3'): raise Exception('requires gluonnlp == 0.8.3') if version.parse(mx.__version__) != version.parse('1.5.0'): raise Exception('requires mxnet == 1.5.0') logging.set_verbosity_info() __magic_name__ : Optional[int] =logging.get_logger(__name__) __magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!' def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ): '''simple docstring''' __magic_name__ = { "attention_cell": "multi_head", "num_layers": 4, "units": 1024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1e-5, "token_type_vocab_size": 2, } __magic_name__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __magic_name__ = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __magic_name__ = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab __magic_name__ = os.path.join(get_home_dir() , "models" ) __magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ ) __magic_name__ = nlp.model.BERTModel( lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , ) original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ ) __magic_name__ = original_bort._collect_params_with_prefix() # Build our config 🤗 __magic_name__ = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(lowerCamelCase_ ), } __magic_name__ = BertConfig.from_dict(lowerCamelCase_ ) __magic_name__ = BertForMaskedLM(lowerCamelCase_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ): __magic_name__ = hf_param.shape __magic_name__ = to_torch(params[gluon_param] ) __magic_name__ = gluon_param.shape assert ( shape_hf == shape_gluon ), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param __magic_name__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) __magic_name__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) __magic_name__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) __magic_name__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __magic_name__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __magic_name__ = hf_bort_model.bert.encoder.layer[i] # self attention __magic_name__ = layer.attention.self __magic_name__ = check_and_map_params( self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) __magic_name__ = check_and_map_params( self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) __magic_name__ = check_and_map_params( self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) __magic_name__ = check_and_map_params( self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) __magic_name__ = check_and_map_params( self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) __magic_name__ = check_and_map_params( self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output __magic_name__ = layer.attention.output __magic_name__ = check_and_map_params( self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' ) __magic_name__ = check_and_map_params( self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' ) __magic_name__ = check_and_map_params( self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' ) __magic_name__ = check_and_map_params( self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate __magic_name__ = layer.intermediate __magic_name__ = check_and_map_params( intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) __magic_name__ = check_and_map_params( intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output __magic_name__ = layer.output __magic_name__ = check_and_map_params( bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) __magic_name__ = check_and_map_params( bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) __magic_name__ = check_and_map_params( bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) __magic_name__ = check_and_map_params( bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" ) __magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"] # Get gluon output __magic_name__ = mx.nd.array([input_ids] ) __magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase_ ) __magic_name__ = BertModel.from_pretrained(lowerCamelCase_ ) hf_bort_model.eval() __magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" ) __magic_name__ = hf_bort_model(**lowerCamelCase_ )[0] __magic_name__ = output_gluon[0].asnumpy() __magic_name__ = output_hf[0].detach().numpy() __magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() __magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , lowerCamelCase_ ) if __name__ == "__main__": __magic_name__ : int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __magic_name__ : Optional[Any] =parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
664
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __magic_name__ : List[Any] ={ 'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : str =[ 'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'ResNetForImageClassification', 'ResNetModel', 'ResNetPreTrainedModel', 'ResNetBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : str =[ 'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFResNetForImageClassification', 'TFResNetModel', 'TFResNetPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Optional[Any] =[ 'FlaxResNetForImageClassification', 'FlaxResNetModel', 'FlaxResNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __magic_name__ : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure)
664
'''simple docstring''' def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) __magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b" __magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b" __magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
664
1
'''simple docstring''' def __snake_case ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __snake_case ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int]=0 ): '''simple docstring''' return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[column] ) def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=float("inf" ) ): '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 , lowerCamelCase_ ): __magic_name__ = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __magic_name__ = current_dis return min_dis def __snake_case ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any]=float("inf" ) ): '''simple docstring''' for i in range(min(6 , points_counts - 1 ) , lowerCamelCase_ ): for j in range(max(0 , i - 6 ) , lowerCamelCase_ ): __magic_name__ = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __magic_name__ = current_dis return min_dis def __snake_case ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : str ): '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(lowerCamelCase_ , lowerCamelCase_ ) # recursion __magic_name__ = points_counts // 2 __magic_name__ = closest_pair_of_points_sqr( lowerCamelCase_ , points_sorted_on_y[:mid] , lowerCamelCase_ ) __magic_name__ = closest_pair_of_points_sqr( lowerCamelCase_ , points_sorted_on_y[mid:] , points_counts - mid ) __magic_name__ = min(lowerCamelCase_ , lowerCamelCase_ ) __magic_name__ = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(lowerCamelCase_ ) __magic_name__ = dis_between_closest_in_strip( lowerCamelCase_ , len(lowerCamelCase_ ) , lowerCamelCase_ ) return min(lowerCamelCase_ , lowerCamelCase_ ) def __snake_case ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ): '''simple docstring''' __magic_name__ = column_based_sort(lowerCamelCase_ , column=0 ) __magic_name__ = column_based_sort(lowerCamelCase_ , column=1 ) return ( closest_pair_of_points_sqr( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) ) ** 0.5 if __name__ == "__main__": __magic_name__ : List[Any] =[(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('Distance:', closest_pair_of_points(points, len(points)))
664
'''simple docstring''' import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __magic_name__ : Tuple =threading.Lock() __magic_name__ : Optional[logging.Handler] =None __magic_name__ : List[str] ={ 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } __magic_name__ : str =logging.WARNING __magic_name__ : Any =True def __snake_case ( ): '''simple docstring''' __magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ' F'has to be one of: { ", ".join(log_levels.keys() ) }' ) return _default_log_level def __snake_case ( ): '''simple docstring''' return __name__.split("." )[0] def __snake_case ( ): '''simple docstring''' return logging.getLogger(_get_library_name() ) def __snake_case ( ): '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return __magic_name__ = logging.StreamHandler() # Set sys.stderr as stream. __magic_name__ = sys.stderr.flush # Apply our default configuration to the library root logger. __magic_name__ = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) __magic_name__ = False def __snake_case ( ): '''simple docstring''' global _default_handler with _lock: if not _default_handler: return __magic_name__ = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) __magic_name__ = None def __snake_case ( ): '''simple docstring''' return log_levels def __snake_case ( lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if name is None: __magic_name__ = _get_library_name() _configure_library_root_logger() return logging.getLogger(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def __snake_case ( lowerCamelCase_ : int ): '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' return set_verbosity(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' return set_verbosity(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' return set_verbosity(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' return set_verbosity(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def __snake_case ( lowerCamelCase_ : logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(lowerCamelCase_ ) def __snake_case ( lowerCamelCase_ : logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() __magic_name__ = False def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() __magic_name__ = True def __snake_case ( ): '''simple docstring''' __magic_name__ = _get_library_root_logger().handlers for handler in handlers: __magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" ) handler.setFormatter(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' __magic_name__ = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(lowerCamelCase_ ) def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ): '''simple docstring''' __magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ ) if no_advisory_warnings: return self.warning(*lowerCamelCase_ , **lowerCamelCase_ ) __magic_name__ : int =warning_advice @functools.lru_cache(lowerCamelCase_ ) def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ): '''simple docstring''' self.warning(*lowerCamelCase_ , **lowerCamelCase_ ) __magic_name__ : Optional[int] =warning_once class UpperCamelCase_ : """simple docstring""" def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument __magic_name__ = args[0] if args else None def __iter__( self : int ) -> Tuple: return iter(self._iterator ) def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]: def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : Optional[Any] ) -> Any: return self def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict: return class UpperCamelCase_ : """simple docstring""" def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]: if _tqdm_active: return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase ) else: return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase ) def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]: __magic_name__ = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase ) def __A ( self : str ) -> Any: if _tqdm_active: return tqdm_lib.tqdm.get_lock() __magic_name__ : List[Any] =_tqdm_cls() def __snake_case ( ): '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def __snake_case ( ): '''simple docstring''' global _tqdm_active __magic_name__ = True hf_hub_utils.enable_progress_bars() def __snake_case ( ): '''simple docstring''' global _tqdm_active __magic_name__ = False hf_hub_utils.disable_progress_bars()
664
1
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase_ : """simple docstring""" UpperCAmelCase__ : Optional[Union[str, Path]] = None UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : Optional[Dict] = None UpperCAmelCase__ : Optional[str] = None UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = False UpperCAmelCase__ : bool = True UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Optional[Union[str, bool]] = None UpperCAmelCase__ : bool = False UpperCAmelCase__ : Optional[Dict] = None UpperCAmelCase__ : Optional[str] = None def __A ( self : int ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(_lowerCamelCase ) for k, v in self.__dict__.items()} )
664
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : str =[ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
664
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : int = ['''torch'''] def __init__( self : Tuple , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : str , **_lowerCamelCase : Optional[Any] ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : str ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[str] = ['''torch'''] def __init__( self : Tuple , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[Any] ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Tuple , *_lowerCamelCase : Dict , **_lowerCamelCase : str ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Any , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : str ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : int = ['''torch'''] def __init__( self : List[str] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[Any] ) -> int: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : int , **_lowerCamelCase : Any ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[str] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> Tuple: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : int = ['''torch'''] def __init__( self : List[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Dict ) -> int: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[Any] ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Any ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ['''torch'''] def __init__( self : Tuple , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Optional[int] ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : List[str] , **_lowerCamelCase : int ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Tuple ) -> List[str]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Tuple = ['''torch'''] def __init__( self : int , *_lowerCamelCase : Dict , **_lowerCamelCase : Union[str, Any] ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Optional[int] ) -> str: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[str] , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict ) -> Tuple: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[str] = ['''torch'''] def __init__( self : Optional[int] , *_lowerCamelCase : str , **_lowerCamelCase : List[Any] ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Optional[Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ) -> str: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : str , **_lowerCamelCase : Dict ) -> Dict: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Tuple = ['''torch'''] def __init__( self : Dict , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Dict ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : str , **_lowerCamelCase : Dict ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Tuple , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Any ) -> Optional[int]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : int = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : str ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : str ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Tuple , *_lowerCamelCase : str , **_lowerCamelCase : Union[str, Any] ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[str] = ['''torch'''] def __init__( self : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : str ) -> str: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : int ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Dict = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Dict ) -> Dict: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Optional[int] , *_lowerCamelCase : List[str] , **_lowerCamelCase : int ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[Any] ) -> Any: requires_backends(cls , ["torch"] ) def __snake_case ( *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' requires_backends(lowerCamelCase_ , ["torch"] ) def __snake_case ( *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(lowerCamelCase_ , ["torch"] ) def __snake_case ( *lowerCamelCase_ : Dict , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(lowerCamelCase_ , ["torch"] ) def __snake_case ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Dict ): '''simple docstring''' requires_backends(lowerCamelCase_ , ["torch"] ) def __snake_case ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(lowerCamelCase_ , ["torch"] ) def __snake_case ( *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' requires_backends(lowerCamelCase_ , ["torch"] ) def __snake_case ( *lowerCamelCase_ : int , **lowerCamelCase_ : List[str] ): '''simple docstring''' requires_backends(lowerCamelCase_ , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Tuple = ['''torch'''] def __init__( self : str , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Dict ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : str ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Optional[int] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Dict ) -> Optional[int]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ['''torch'''] def __init__( self : Optional[int] , *_lowerCamelCase : int , **_lowerCamelCase : str ) -> Any: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Any , **_lowerCamelCase : Optional[Any] ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Optional[int] , *_lowerCamelCase : Tuple , **_lowerCamelCase : int ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ['''torch'''] def __init__( self : List[Any] , *_lowerCamelCase : Any , **_lowerCamelCase : Optional[int] ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Any , **_lowerCamelCase : int ) -> str: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Optional[Any] , *_lowerCamelCase : Any , **_lowerCamelCase : Any ) -> List[str]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[str] = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Optional[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : str ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : str ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Tuple = ['''torch'''] def __init__( self : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : List[str] , **_lowerCamelCase : int ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ) -> str: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : str = ['''torch'''] def __init__( self : Dict , *_lowerCamelCase : Any , **_lowerCamelCase : Dict ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Any , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Tuple , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Optional[Any] ) -> Optional[int]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Any = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[int] ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : List[Any] ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : List[Any] ) -> Optional[int]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ['''torch'''] def __init__( self : List[str] , *_lowerCamelCase : Dict , **_lowerCamelCase : List[str] ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Dict ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Any ) -> int: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : int = ['''torch'''] def __init__( self : Any , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Optional[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : List[Any] ) -> int: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Any , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Any ) -> str: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[Any] = ['''torch'''] def __init__( self : str , *_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ) -> Dict: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : str , **_lowerCamelCase : Dict ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Optional[int] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Union[str, Any] ) -> Tuple: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ['''torch'''] def __init__( self : str , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[int] ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : Union[str, Any] ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Union[str, Any] ) -> Any: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ['''torch'''] def __init__( self : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Tuple , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Optional[Any] ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : int = ['''torch'''] def __init__( self : Any , *_lowerCamelCase : int , **_lowerCamelCase : str ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : Union[str, Any] ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Any , *_lowerCamelCase : int , **_lowerCamelCase : Union[str, Any] ) -> str: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Any = ['''torch'''] def __init__( self : Any , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : int ) -> Dict: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Optional[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[Any] ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Dict ) -> str: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : int = ['''torch'''] def __init__( self : str , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : Any , **_lowerCamelCase : Optional[Any] ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Dict , **_lowerCamelCase : List[str] ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ['''torch'''] def __init__( self : str , *_lowerCamelCase : int , **_lowerCamelCase : List[Any] ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Optional[int] ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Any , **_lowerCamelCase : Optional[Any] ) -> Optional[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Dict = ['''torch'''] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Any ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Tuple ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Any , *_lowerCamelCase : Tuple , **_lowerCamelCase : List[Any] ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[Any] = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[int] ) -> Optional[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : str ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : str = ['''torch'''] def __init__( self : Dict , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Tuple ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : Any , **_lowerCamelCase : List[Any] ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ['''torch'''] def __init__( self : Any , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[int] ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Optional[int] ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Tuple ) -> Dict: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ['''torch'''] def __init__( self : List[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Optional[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Dict ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Any ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : List[str] ) -> str: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Optional[int] , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ) -> Tuple: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Tuple = ['''torch'''] def __init__( self : Optional[int] , *_lowerCamelCase : Tuple , **_lowerCamelCase : Dict ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : str ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Tuple , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Tuple ) -> Tuple: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : int = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> int: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : int , **_lowerCamelCase : List[str] ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[Any] ) -> Tuple: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ['''torch'''] def __init__( self : int , *_lowerCamelCase : str , **_lowerCamelCase : str ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : str , **_lowerCamelCase : str ) -> Union[str, Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Union[str, Any] ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[Any] = ['''torch'''] def __init__( self : Tuple , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[Any] ) -> Tuple: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : Any , **_lowerCamelCase : List[Any] ) -> int: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Optional[Any] ) -> Tuple: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[Any] = ['''torch'''] def __init__( self : Tuple , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict ) -> str: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : int , **_lowerCamelCase : str ) -> List[str]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Any: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : str = ['''torch'''] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[Any] ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : str ) -> Tuple: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Tuple , *_lowerCamelCase : str , **_lowerCamelCase : Union[str, Any] ) -> int: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : Any , **_lowerCamelCase : Tuple ) -> Any: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[str] ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[int] ) -> Optional[int]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[str] = ['''torch'''] def __init__( self : Dict , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Dict , **_lowerCamelCase : List[str] ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : List[str] ) -> List[str]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[Any] = ['''torch'''] def __init__( self : List[Any] , *_lowerCamelCase : Any , **_lowerCamelCase : str ) -> Optional[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Any ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Tuple , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : str ) -> Optional[int]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : int = ['''torch'''] def __init__( self : List[str] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Dict ) -> int: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : List[Any] ) -> int: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ['''torch'''] def __init__( self : List[str] , *_lowerCamelCase : Any , **_lowerCamelCase : Tuple ) -> int: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Optional[Any] ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> str: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ['''torch'''] def __init__( self : List[str] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : List[Any] ) -> Dict: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[str] , *_lowerCamelCase : Any , **_lowerCamelCase : Any ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : Any ) -> Dict: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : str = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Any ) -> Any: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : List[str] , **_lowerCamelCase : Any ) -> int: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Dict , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Optional[Any] ) -> Any: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Any = ['''torch'''] def __init__( self : int , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[int] ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : List[Any] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : str ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Tuple , *_lowerCamelCase : List[str] , **_lowerCamelCase : str ) -> List[str]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ['''torch'''] def __init__( self : Union[str, Any] , *_lowerCamelCase : int , **_lowerCamelCase : Optional[Any] ) -> List[Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : int , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Union[str, Any] ) -> Dict: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : Union[str, Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Dict ) -> str: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ['''torch'''] def __init__( self : List[Any] , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Union[str, Any]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Any , **_lowerCamelCase : Optional[int] ) -> List[Any]: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[str] , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[int] ) -> List[Any]: requires_backends(cls , ["torch"] ) class UpperCamelCase_ ( metaclass=A ): """simple docstring""" UpperCAmelCase__ : List[Any] = ['''torch'''] def __init__( self : Optional[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[int] ) -> List[str]: requires_backends(self , ["torch"] ) @classmethod def __A ( cls : str , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Optional[int] ) -> Any: requires_backends(cls , ["torch"] ) @classmethod def __A ( cls : List[str] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Any ) -> Dict: requires_backends(cls , ["torch"] )
664
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __magic_name__ : Optional[Any] ={ 'configuration_longformer': [ 'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongformerConfig', 'LongformerOnnxConfig', ], 'tokenization_longformer': ['LongformerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : int =['LongformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Dict =[ 'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongformerForMaskedLM', 'LongformerForMultipleChoice', 'LongformerForQuestionAnswering', 'LongformerForSequenceClassification', 'LongformerForTokenClassification', 'LongformerModel', 'LongformerPreTrainedModel', 'LongformerSelfAttention', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Tuple =[ 'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLongformerForMaskedLM', 'TFLongformerForMultipleChoice', 'TFLongformerForQuestionAnswering', 'TFLongformerForSequenceClassification', 'TFLongformerForTokenClassification', 'TFLongformerModel', 'TFLongformerPreTrainedModel', 'TFLongformerSelfAttention', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys __magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
664
1
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" def __A ( self : Optional[int] , _lowerCamelCase : Dict ) -> Optional[int]: __magic_name__ = 3 __magic_name__ = 2_50 __magic_name__ = ids_tensor((batch_size, length) , _lowerCamelCase ) __magic_name__ = torch.ones((batch_size, length) , device=_lowerCamelCase , dtype=torch.float ) / length return input_ids, scores def __A ( self : str ) -> Any: __magic_name__ , __magic_name__ = self._get_tensors(5 ) __magic_name__ = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) ) __magic_name__ , __magic_name__ = self._get_tensors(9 ) self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) ) __magic_name__ , __magic_name__ = self._get_tensors(10 ) self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) ) def __A ( self : Optional[Any] ) -> Optional[int]: __magic_name__ = MaxLengthCriteria(max_length=10 ) __magic_name__ , __magic_name__ = self._get_tensors(5 ) self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) ) __magic_name__ , __magic_name__ = self._get_tensors(9 ) self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) ) __magic_name__ , __magic_name__ = self._get_tensors(10 ) self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) ) def __A ( self : Union[str, Any] ) -> Optional[Any]: __magic_name__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __magic_name__ , __magic_name__ = self._get_tensors(5 ) self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) ) __magic_name__ , __magic_name__ = self._get_tensors(9 ) self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) ) __magic_name__ , __magic_name__ = self._get_tensors(10 ) self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) ) __magic_name__ = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __A ( self : int ) -> str: __magic_name__ , __magic_name__ = self._get_tensors(5 ) __magic_name__ = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(_lowerCamelCase , _lowerCamelCase ) ) __magic_name__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(_lowerCamelCase , _lowerCamelCase ) ) def __A ( self : Tuple ) -> Union[str, Any]: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(_lowerCamelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __magic_name__ = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(_lowerCamelCase ) , 1 )
664
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): __magic_name__ : str ={ 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: __magic_name__ : Tuple ={ 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def __snake_case ( lowerCamelCase_ : Optional[Any] ): '''simple docstring''' __magic_name__ = (images / 2 + 0.5).clamp(0 , 1 ) __magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __magic_name__ = numpy_to_pil(lowerCamelCase_ ) return images def __snake_case ( lowerCamelCase_ : Optional[Any] ): '''simple docstring''' if images.ndim == 3: __magic_name__ = images[None, ...] __magic_name__ = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images __magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: __magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images] return pil_images
664
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor __magic_name__ : str =logging.get_logger(__name__) class UpperCamelCase_ ( A ): """simple docstring""" def __init__( self : Union[str, Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Any ) -> None: warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , _lowerCamelCase , ) super().__init__(*_lowerCamelCase , **_lowerCamelCase )
664
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __magic_name__ : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings( A , r''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class UpperCamelCase_ ( A ): """simple docstring""" def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray: if self.framework == "tf": __magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": __magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ) else: raise ValueError("Unsupported framework" ) return masked_index def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray: __magic_name__ = self.get_masked_index(_lowerCamelCase ) __magic_name__ = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , ) def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any: if isinstance(_lowerCamelCase , _lowerCamelCase ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_lowerCamelCase ) def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]: if return_tensors is None: __magic_name__ = self.framework __magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) self.ensure_exactly_one_mask_token(_lowerCamelCase ) return model_inputs def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]: __magic_name__ = self.model(**_lowerCamelCase ) __magic_name__ = model_inputs["input_ids"] return model_outputs def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: __magic_name__ = target_ids.shape[0] __magic_name__ = model_outputs["input_ids"][0] __magic_name__ = model_outputs["logits"] if self.framework == "tf": __magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] __magic_name__ = outputs.numpy() __magic_name__ = outputs[0, masked_index, :] __magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 ) if target_ids is not None: __magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) ) __magic_name__ = tf.expand_dims(_lowerCamelCase , 0 ) __magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase ) __magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy() else: __magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample __magic_name__ = outputs[0, masked_index, :] __magic_name__ = logits.softmax(dim=-1 ) if target_ids is not None: __magic_name__ = probs[..., target_ids] __magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase ) __magic_name__ = [] __magic_name__ = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): __magic_name__ = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place __magic_name__ = input_ids.numpy().copy() if target_ids is not None: __magic_name__ = target_ids[p].tolist() __magic_name__ = p # Filter padding out: __magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back __magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) __magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(_lowerCamelCase ) result.append(_lowerCamelCase ) if single_mask: return result[0] return result def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __magic_name__ = [targets] try: __magic_name__ = self.tokenizer.get_vocab() except Exception: __magic_name__ = {} __magic_name__ = [] for target in targets: __magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase ) if id_ is None: __magic_name__ = self.tokenizer( _lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"] if len(_lowerCamelCase ) == 0: logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' "We cannot replace it with anything meaningful, ignoring it" ) continue __magic_name__ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' ) target_ids.append(id_ ) __magic_name__ = list(set(_lowerCamelCase ) ) if len(_lowerCamelCase ) == 0: raise ValueError("At least one target must be provided when passed." ) __magic_name__ = np.array(_lowerCamelCase ) return target_ids def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple: __magic_name__ = {} if targets is not None: __magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = target_ids if top_k is not None: __magic_name__ = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]: __magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1: return outputs[0] return outputs
664
1
'''simple docstring''' from functools import lru_cache @lru_cache def __snake_case ( lowerCamelCase_ : int ): '''simple docstring''' if num < 0: raise ValueError("Number should not be negative." ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
664
'''simple docstring''' from __future__ import annotations def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ): '''simple docstring''' if len(lowerCamelCase_ ) < k or k < 0: raise ValueError("Invalid Input" ) __magic_name__ = __magic_name__ = sum(array[:k] ) for i in range(len(lowerCamelCase_ ) - k ): __magic_name__ = current_sum - array[i] + array[i + k] __magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)] __magic_name__ : List[str] =randint(0, 1_10) print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
664
1
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ): '''simple docstring''' assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): '''simple docstring''' __magic_name__ = tmp_path / "cache" __magic_name__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = SqlDatasetReader( "dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ ).read() _check_sql_dataset(lowerCamelCase_ , lowerCamelCase_ ) @require_sqlalchemy @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ): '''simple docstring''' __magic_name__ = tmp_path / "cache" __magic_name__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(lowerCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ ).read() _check_sql_dataset(lowerCamelCase_ , lowerCamelCase_ ) def __snake_case ( lowerCamelCase_ : Optional[int] ): '''simple docstring''' with contextlib.closing(sqlitea.connect(lowerCamelCase_ ) ) as con: __magic_name__ = con.cursor() cur.execute("SELECT * FROM dataset" ) for row in cur: yield row @require_sqlalchemy def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict ): '''simple docstring''' __magic_name__ = tmp_path / "cache" __magic_name__ = os.path.join(lowerCamelCase_ , "tmp.sql" ) __magic_name__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase_ ).read() SqlDatasetWriter(lowerCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write() __magic_name__ = iter_sql_file(lowerCamelCase_ ) __magic_name__ = iter_sql_file(lowerCamelCase_ ) for rowa, rowa in zip(lowerCamelCase_ , lowerCamelCase_ ): assert rowa == rowa @require_sqlalchemy def __snake_case ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ): '''simple docstring''' __magic_name__ = tmp_path / "cache" __magic_name__ = os.path.join(lowerCamelCase_ , "tmp.sql" ) __magic_name__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase_ ).read() SqlDatasetWriter(lowerCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write() __magic_name__ = iter_sql_file(lowerCamelCase_ ) __magic_name__ = iter_sql_file(lowerCamelCase_ ) for rowa, rowa in zip(lowerCamelCase_ , lowerCamelCase_ ): assert rowa == rowa @require_sqlalchemy def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' __magic_name__ = tmp_path / "cache" __magic_name__ = os.path.join(lowerCamelCase_ , "tmp.sql" ) __magic_name__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowerCamelCase_ ).read() with pytest.raises(lowerCamelCase_ ): SqlDatasetWriter(lowerCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
664
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ : int =logging.get_logger(__name__) __magic_name__ : List[Any] ={} class UpperCamelCase_ ( A ): """simple docstring""" UpperCAmelCase__ : int = '''llama''' UpperCAmelCase__ : Any = ['''past_key_values'''] def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any: __magic_name__ = vocab_size __magic_name__ = max_position_embeddings __magic_name__ = hidden_size __magic_name__ = intermediate_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads # for backward compatibility if num_key_value_heads is None: __magic_name__ = num_attention_heads __magic_name__ = num_key_value_heads __magic_name__ = hidden_act __magic_name__ = initializer_range __magic_name__ = rms_norm_eps __magic_name__ = pretraining_tp __magic_name__ = use_cache __magic_name__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , ) def __A ( self : Union[str, Any] ) -> List[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'got {self.rope_scaling}' ) __magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase ) __magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
664
1
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets __magic_name__ : Optional[int] ='\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' __magic_name__ : str ='\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n' __magic_name__ : Optional[int] ='\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): """simple docstring""" def __A ( self : List[Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __A ( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ) -> Union[str, Any]: __magic_name__ = len(references[0] ) if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) __magic_name__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )] __magic_name__ = TER( normalized=_lowerCamelCase , no_punct=_lowerCamelCase , asian_support=_lowerCamelCase , case_sensitive=_lowerCamelCase , ) __magic_name__ = sb_ter.corpus_score(_lowerCamelCase , _lowerCamelCase ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
664
'''simple docstring''' __magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1 def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ): '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ): '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
664
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectrona, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class UpperCamelCase_ : """simple docstring""" @staticmethod def __A ( *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Optional[Any] ) -> int: pass def __snake_case ( lowerCamelCase_ : List[str] ): '''simple docstring''' return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. __magic_name__ : Union[str, Any] =( 'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png' ) @is_pipeline_test @require_torch @require_vision class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Dict = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def __A ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ) -> str: __magic_name__ = pipeline( "document-question-answering" , model=_lowerCamelCase , tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) __magic_name__ = INVOICE_URL __magic_name__ = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , "" ) ) ) __magic_name__ = "What is the placebo?" __magic_name__ = [ { "image": load_image(_lowerCamelCase ), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def __A ( self : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Any ) -> Optional[int]: __magic_name__ = dqa_pipeline(_lowerCamelCase , top_k=2 ) self.assertEqual( _lowerCamelCase , [ [ {"score": ANY(_lowerCamelCase ), "answer": ANY(_lowerCamelCase ), "start": ANY(_lowerCamelCase ), "end": ANY(_lowerCamelCase )}, {"score": ANY(_lowerCamelCase ), "answer": ANY(_lowerCamelCase ), "start": ANY(_lowerCamelCase ), "end": ANY(_lowerCamelCase )}, ] ] * 3 , ) @require_torch @require_detectrona @require_pytesseract def __A ( self : List[str] ) -> List[Any]: __magic_name__ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" ) __magic_name__ = INVOICE_URL __magic_name__ = "How many cats are there?" __magic_name__ = [ {"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] __magic_name__ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , _lowerCamelCase ) __magic_name__ = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , _lowerCamelCase ) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably __magic_name__ = "./tests/fixtures/tests_samples/COCO/000000039769.png" __magic_name__ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 ) self.assertEqual(_lowerCamelCase , [] ) # We can optionnally pass directly the words and bounding boxes __magic_name__ = "./tests/fixtures/tests_samples/COCO/000000039769.png" __magic_name__ = [] __magic_name__ = [] __magic_name__ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , words=_lowerCamelCase , boxes=_lowerCamelCase , top_k=2 ) self.assertEqual(_lowerCamelCase , [] ) @slow @require_torch @require_detectrona @require_pytesseract def __A ( self : Optional[int] ) -> Dict: __magic_name__ = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , ) __magic_name__ = INVOICE_URL __magic_name__ = "What is the invoice number?" __magic_name__ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) __magic_name__ = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ] , ) __magic_name__ = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2 , ) @slow @require_torch @require_detectrona @require_pytesseract def __A ( self : List[Any] ) -> int: __magic_name__ = pipeline( "document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , ) __magic_name__ = INVOICE_URL __magic_name__ = "What is the invoice number?" __magic_name__ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) __magic_name__ = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] , ) __magic_name__ = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) @slow @require_torch @require_pytesseract @require_vision def __A ( self : Tuple ) -> List[Any]: __magic_name__ = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=_lowerCamelCase ) __magic_name__ = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=_lowerCamelCase , revision="3dc6de3" , ) __magic_name__ = INVOICE_URL __magic_name__ = "What is the invoice number?" __magic_name__ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) __magic_name__ = dqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) __magic_name__ = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2 , ) __magic_name__ = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None __magic_name__ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ {"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23}, ] , ) @slow @require_torch @require_pytesseract @require_vision def __A ( self : Dict ) -> Optional[Any]: __magic_name__ = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=_lowerCamelCase ) __magic_name__ = pipeline( "document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=_lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , ) __magic_name__ = INVOICE_URL __magic_name__ = "What is the invoice number?" __magic_name__ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) __magic_name__ = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2 , ) __magic_name__ = list(zip(*apply_tesseract(load_image(_lowerCamelCase ) , _lowerCamelCase , "" ) ) ) # This model should also work if `image` is set to None __magic_name__ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=4 ) , [ {"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16}, ] , ) @slow @require_torch def __A ( self : str ) -> Tuple: __magic_name__ = pipeline( "document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , ) __magic_name__ = INVOICE_URL __magic_name__ = "What is the invoice number?" __magic_name__ = dqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 ) self.assertEqual(nested_simplify(_lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] ) @require_tf @unittest.skip("Document question answering not implemented in TF" ) def __A ( self : Optional[int] ) -> str: pass
664
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __magic_name__ : List[Any] =logging.getLogger(__name__) class UpperCamelCase_ ( A ): """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]: # in NER datasets, the last column is usually reserved for NER label __magic_name__ = label_idx def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __magic_name__ = mode.value __magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' ) __magic_name__ = 1 __magic_name__ = [] with open(_lowerCamelCase , encoding="utf-8" ) as f: __magic_name__ = [] __magic_name__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) ) guid_index += 1 __magic_name__ = [] __magic_name__ = [] else: __magic_name__ = line.split(" " ) words.append(splits[0] ) if len(_lowerCamelCase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) ) return examples def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]: __magic_name__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(_lowerCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: __magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(_lowerCamelCase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]: if path: with open(_lowerCamelCase , "r" ) as f: __magic_name__ = f.read().splitlines() if "O" not in labels: __magic_name__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCamelCase_ ( A ): """simple docstring""" def __init__( self : int ) -> str: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __A ( self : int , _lowerCamelCase : str ) -> List[str]: if path: with open(_lowerCamelCase , "r" ) as f: __magic_name__ = f.read().splitlines() if "O" not in labels: __magic_name__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCamelCase_ ( A ): """simple docstring""" def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __magic_name__ = mode.value __magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' ) __magic_name__ = 1 __magic_name__ = [] with open(_lowerCamelCase , encoding="utf-8" ) as f: for sentence in parse_incr(_lowerCamelCase ): __magic_name__ = [] __magic_name__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) ) guid_index += 1 return examples def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any: __magic_name__ = 0 for sentence in parse_incr(_lowerCamelCase ): __magic_name__ = preds_list[example_id] __magic_name__ = "" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(_lowerCamelCase ) example_id += 1 def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]: if path: with open(_lowerCamelCase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
664
1
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __magic_name__ : Any =16 __magic_name__ : Optional[int] =32 def __snake_case ( lowerCamelCase_ : Accelerator , lowerCamelCase_ : int = 16 ): '''simple docstring''' __magic_name__ = AutoTokenizer.from_pretrained("bert-base-cased" ) __magic_name__ = load_dataset("glue" , "mrpc" ) def tokenize_function(lowerCamelCase_ : str ): # max_length=None => use the model max length (it's actually the default) __magic_name__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __magic_name__ = datasets.map( lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __magic_name__ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCamelCase_ : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. __magic_name__ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __magic_name__ = 16 elif accelerator.mixed_precision != "no": __magic_name__ = 8 else: __magic_name__ = None return tokenizer.pad( lowerCamelCase_ , padding="longest" , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors="pt" , ) # Instantiate dataloaders. __magic_name__ = DataLoader( tokenized_datasets["train"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) __magic_name__ = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __magic_name__ : Dict =mocked_dataloaders # noqa: F811 def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : int ): '''simple docstring''' if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase_ ) == "1": __magic_name__ = 2 # Initialize accelerator __magic_name__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __magic_name__ = config["lr"] __magic_name__ = int(config["num_epochs"] ) __magic_name__ = int(config["seed"] ) __magic_name__ = int(config["batch_size"] ) __magic_name__ = evaluate.load("glue" , "mrpc" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCamelCase_ ) def inner_training_loop(lowerCamelCase_ : str ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCamelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __magic_name__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __magic_name__ = model.to(accelerator.device ) # Instantiate optimizer __magic_name__ = AdamW(params=model.parameters() , lr=lowerCamelCase_ ) __magic_name__ , __magic_name__ = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ ) # Instantiate scheduler __magic_name__ = get_linear_schedule_with_warmup( optimizer=lowerCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Now we train the model for epoch in range(lowerCamelCase_ ): model.train() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __magic_name__ = model(**lowerCamelCase_ ) __magic_name__ = outputs.loss accelerator.backward(lowerCamelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __magic_name__ = model(**lowerCamelCase_ ) __magic_name__ = outputs.logits.argmax(dim=-1 ) __magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=lowerCamelCase_ , references=lowerCamelCase_ , ) __magic_name__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowerCamelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __snake_case ( ): '''simple docstring''' __magic_name__ = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __magic_name__ = parser.parse_args() __magic_name__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
664
'''simple docstring''' from __future__ import annotations from typing import Any class UpperCamelCase_ : """simple docstring""" def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None: __magic_name__ , __magic_name__ = row, column __magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )] def __str__( self : Optional[Any] ) -> str: __magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier __magic_name__ = 0 for row_vector in self.array: for obj in row_vector: __magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) ) __magic_name__ = f'%{max_element_length}s' # Make string and return def single_line(_lowerCamelCase : list[float] ) -> str: nonlocal string_format_identifier __magic_name__ = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self : Optional[int] ) -> str: return str(self ) def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool: if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any: assert self.validate_indicies(_lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None: assert self.validate_indicies(_lowerCamelCase ) __magic_name__ = value def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix: assert isinstance(_lowerCamelCase , _lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add __magic_name__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __magic_name__ = self[r, c] + another[r, c] return result def __neg__( self : int ) -> Matrix: __magic_name__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __magic_name__ = -self[r, c] return result def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix: return self + (-another) def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix: if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication __magic_name__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __magic_name__ = self[r, c] * another return result elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication assert self.column == another.row __magic_name__ = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})' raise TypeError(_lowerCamelCase ) def __A ( self : Optional[int] ) -> Matrix: __magic_name__ = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __magic_name__ = self[r, c] return result def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any: assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __magic_name__ = v.transpose() __magic_name__ = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def __snake_case ( ): '''simple docstring''' __magic_name__ = Matrix(3 , 3 , 0 ) for i in range(3 ): __magic_name__ = 1 print(F'a^(-1) is {ainv}' ) # u, v __magic_name__ = Matrix(3 , 1 , 0 ) __magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3 __magic_name__ = Matrix(3 , 1 , 0 ) __magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5 print(F'u is {u}' ) print(F'v is {v}' ) print(F'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' ) def __snake_case ( ): '''simple docstring''' import doctest doctest.testmod() testa()
664
1
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : """simple docstring""" def __init__( self : Dict , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Dict=7 , _lowerCamelCase : str=True , _lowerCamelCase : str=True , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Dict=99 , _lowerCamelCase : int=24 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=6 , _lowerCamelCase : Any=37 , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : str=5_12 , _lowerCamelCase : int=16 , _lowerCamelCase : str=2 , _lowerCamelCase : Tuple=0.02 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : List[Any]=None , _lowerCamelCase : str=10_00 , ) -> Dict: __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = scope __magic_name__ = range_bbox def __A ( self : List[str] ) -> Tuple: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __magic_name__ = bbox[i, j, 3] __magic_name__ = bbox[i, j, 1] __magic_name__ = t if bbox[i, j, 2] < bbox[i, j, 0]: __magic_name__ = bbox[i, j, 2] __magic_name__ = bbox[i, j, 0] __magic_name__ = t __magic_name__ = None if self.use_input_mask: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __A ( self : Optional[Any] ) -> List[Any]: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __A ( self : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , ) -> Dict: __magic_name__ = LiltModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __magic_name__ = model(_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase , bbox=_lowerCamelCase , token_type_ids=_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase , bbox=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __A ( self : str , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , ) -> List[Any]: __magic_name__ = self.num_labels __magic_name__ = LiltForTokenClassification(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __magic_name__ = model( _lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Any , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , ) -> Any: __magic_name__ = LiltForQuestionAnswering(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() __magic_name__ = model( _lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : Union[str, Any] ) -> int: __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( A , A , A , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) UpperCAmelCase__ : Optional[Any] = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase__ : Optional[Any] = False UpperCAmelCase__ : Dict = False def __A ( self : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] ) -> int: return True def __A ( self : List[Any] ) -> List[Any]: __magic_name__ = LiltModelTester(self ) __magic_name__ = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def __A ( self : Dict ) -> Optional[Any]: self.config_tester.run_common_tests() def __A ( self : str ) -> Dict: __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def __A ( self : List[str] ) -> Optional[int]: __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*_lowerCamelCase ) def __A ( self : List[str] ) -> str: __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) def __A ( self : Optional[Any] ) -> Optional[int]: __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) @slow def __A ( self : List[str] ) -> Optional[Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = LiltModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) @require_torch @slow class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" def __A ( self : Any ) -> str: __magic_name__ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_lowerCamelCase ) __magic_name__ = torch.tensor([[1, 2]] , device=_lowerCamelCase ) __magic_name__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_lowerCamelCase ) # forward pass with torch.no_grad(): __magic_name__ = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase ) __magic_name__ = torch.Size([1, 2, 7_68] ) __magic_name__ = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=_lowerCamelCase , ) self.assertTrue(outputs.last_hidden_state.shape , _lowerCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _lowerCamelCase , atol=1e-3 ) )
664
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) __magic_name__ : List[Any] =logging.getLogger(__name__) __magic_name__ : int ='Hello world! cécé herlolip' __magic_name__ : List[Any] =namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def __snake_case ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ): '''simple docstring''' __magic_name__ = BertAbsConfig( temp_dir="." , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , ) __magic_name__ = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage ) __magic_name__ = AbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) , lowerCamelCase_ ) original.eval() __magic_name__ = BertAbsSummarizer(lowerCamelCase_ , torch.device("cpu" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("convert the model" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("Make sure that the models' outputs are identical" ) __magic_name__ = BertTokenizer.from_pretrained("bert-base-uncased" ) # prepare the model inputs __magic_name__ = tokenizer.encode("This is sample éàalj'-." ) encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) ) __magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) __magic_name__ = tokenizer.encode("This is sample 3 éàalj'-." ) decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) ) __magic_name__ = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass __magic_name__ = encoder_input_ids __magic_name__ = decoder_input_ids __magic_name__ = __magic_name__ = None __magic_name__ = None __magic_name__ = __magic_name__ = None __magic_name__ = __magic_name__ = None __magic_name__ = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical __magic_name__ = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] __magic_name__ = original.generator(lowerCamelCase_ ) __magic_name__ = new_model( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] __magic_name__ = new_model.generator(lowerCamelCase_ ) __magic_name__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) ) __magic_name__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("Maximum absolute difference beween weights: {:.2f}".format(lowerCamelCase_ ) ) __magic_name__ = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) if are_identical: logging.info("all weights are equal up to 1e-3" ) else: raise ValueError("the weights are different. The new model is likely different from the original one." ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("saving the model's state dictionary" ) torch.save( new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" ) if __name__ == "__main__": __magic_name__ : Dict =argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) __magic_name__ : Any =parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
664
1
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __magic_name__ : Optional[Any] =Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) __magic_name__ : Any ={'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} __magic_name__ : List[Any] ='zero2' __magic_name__ : List[Any] ='zero3' __magic_name__ : Dict =[ZEROa, ZEROa] def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' __magic_name__ = parameterized.to_safe_name("_".join(str(lowerCamelCase_ ) for x in param.args ) ) return F'{func.__name__}_{param_based_name}' # Cartesian-product of zero stages with models to test __magic_name__ : Optional[int] =list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class UpperCamelCase_ ( A ): """simple docstring""" @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def __A ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ) -> Any: self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) @require_torch_multi_gpu @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def __A ( self : str , _lowerCamelCase : int , _lowerCamelCase : int ) -> Optional[int]: self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def __A ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ) -> List[Any]: self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) @require_torch_multi_gpu @parameterized.expand(_lowerCamelCase , name_func=_lowerCamelCase ) def __A ( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ) -> Dict: self.run_and_check( stage=_lowerCamelCase , model=_lowerCamelCase , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] ) -> Any: # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def __A ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int = 10 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , ) -> Tuple: __magic_name__ = models[model] __magic_name__ = self.run_trainer( stage=_lowerCamelCase , model_name=_lowerCamelCase , eval_steps=_lowerCamelCase , num_train_epochs=1 , distributed=_lowerCamelCase , fpaa=_lowerCamelCase , ) self.do_checks(_lowerCamelCase ) return output_dir def __A ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1 , _lowerCamelCase : bool = True , _lowerCamelCase : bool = True , ) -> Tuple: __magic_name__ = self.get_auto_remove_tmp_dir("./xxx" , after=_lowerCamelCase ) __magic_name__ = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(_lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split() if fpaa: args.extend(["--fp16"] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __magic_name__ = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split() __magic_name__ = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'] __magic_name__ = self.get_launcher(_lowerCamelCase ) __magic_name__ = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_lowerCamelCase , env=self.get_env() ) return output_dir def __A ( self : Tuple , _lowerCamelCase : int=False ) -> Optional[Any]: # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) __magic_name__ = min(2 , get_gpu_count() ) if distributed else 1 return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
664
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" def __A ( self : List[str] ) -> str: # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __magic_name__ = [[1, 2, 4], [1, 2, 3, 4]] __magic_name__ = DisjunctiveConstraint(_lowerCamelCase ) self.assertTrue(isinstance(dc.token_ids , _lowerCamelCase ) ) with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def __A ( self : List[Any] ) -> str: # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __magic_name__ = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_lowerCamelCase ): DisjunctiveConstraint(_lowerCamelCase ) # fails here def __A ( self : List[Any] ) -> int: __magic_name__ = [[1, 2, 3], [1, 2, 4]] __magic_name__ = DisjunctiveConstraint(_lowerCamelCase ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 ) __magic_name__ = stepped is True and completed is False and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 ) __magic_name__ = stepped is True and completed is False and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 ) __magic_name__ = stepped is True and completed is True and reset is False self.assertTrue(_lowerCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def __A ( self : Any ) -> Union[str, Any]: __magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __magic_name__ = DisjunctiveConstraint(_lowerCamelCase ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
664
1
'''simple docstring''' from __future__ import annotations def __snake_case ( lowerCamelCase_ : tuple[int, int] , lowerCamelCase_ : int ): '''simple docstring''' __magic_name__ , __magic_name__ = position __magic_name__ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] __magic_name__ = [] for position in positions: __magic_name__ , __magic_name__ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(lowerCamelCase_ ) return permissible_positions def __snake_case ( lowerCamelCase_ : list[list[int]] ): '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def __snake_case ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : tuple[int, int] , lowerCamelCase_ : int ): '''simple docstring''' if is_complete(lowerCamelCase_ ): return True for position in get_valid_pos(lowerCamelCase_ , len(lowerCamelCase_ ) ): __magic_name__ , __magic_name__ = position if board[y][x] == 0: __magic_name__ = curr + 1 if open_knight_tour_helper(lowerCamelCase_ , lowerCamelCase_ , curr + 1 ): return True __magic_name__ = 0 return False def __snake_case ( lowerCamelCase_ : int ): '''simple docstring''' __magic_name__ = [[0 for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )] for i in range(lowerCamelCase_ ): for j in range(lowerCamelCase_ ): __magic_name__ = 1 if open_knight_tour_helper(lowerCamelCase_ , (i, j) , 1 ): return board __magic_name__ = 0 __magic_name__ = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(lowerCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
664
'''simple docstring''' import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __magic_name__ : Dict ={ 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_28, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 50, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 10, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 10, 'exponential_decay_length_penalty': (5, 1.0_1), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" @classmethod def __A ( cls : Any ) -> Union[str, Any]: __magic_name__ = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def __A ( cls : Any ) -> Tuple: try: delete_repo(token=cls._token , repo_id="test-config" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-config-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-config" ) except HTTPError: pass def __A ( self : Optional[Any] ) -> Dict: __magic_name__ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("test-config" , use_auth_token=self._token ) __magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-config" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_lowerCamelCase , repo_id="test-config" , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) __magic_name__ = BertConfig.from_pretrained(f'{USER}/test-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def __A ( self : str ) -> Optional[int]: __magic_name__ = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token ) __magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-config-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _lowerCamelCase , repo_id="valid_org/test-config-org" , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) __magic_name__ = BertConfig.from_pretrained("valid_org/test-config-org" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def __A ( self : Optional[int] ) -> Union[str, Any]: CustomConfig.register_for_auto_class() __magic_name__ = CustomConfig(attribute=42 ) config.push_to_hub("test-dynamic-config" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} ) __magic_name__ = AutoConfig.from_pretrained(f'{USER}/test-dynamic-config' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , "CustomConfig" ) self.assertEqual(new_config.attribute , 42 ) class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" def __A ( self : Optional[int] ) -> Optional[Any]: __magic_name__ = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __magic_name__ = c.n_embd + 1 # int __magic_name__ = c.resid_pdrop + 1.0 # float __magic_name__ = not c.scale_attn_weights # bool __magic_name__ = c.summary_type + "foo" # str c.update_from_string( f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' ) self.assertEqual(_lowerCamelCase , c.n_embd , "mismatch for key: n_embd" ) self.assertEqual(_lowerCamelCase , c.resid_pdrop , "mismatch for key: resid_pdrop" ) self.assertEqual(_lowerCamelCase , c.scale_attn_weights , "mismatch for key: scale_attn_weights" ) self.assertEqual(_lowerCamelCase , c.summary_type , "mismatch for key: summary_type" ) def __A ( self : List[Any] ) -> Union[str, Any]: __magic_name__ = PretrainedConfig() __magic_name__ = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( _lowerCamelCase , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] ) __magic_name__ = [key for key, value in config_common_kwargs.items() if value == getattr(_lowerCamelCase , _lowerCamelCase )] if len(_lowerCamelCase ) > 0: raise ValueError( "The following keys are set with the default values in" " `test_configuration_common.config_common_kwargs` pick another value for them:" f' {", ".join(_lowerCamelCase )}.' ) def __A ( self : List[Any] ) -> List[Any]: with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder __magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" ) __magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" ) self.assertIsNotNone(_lowerCamelCase ) def __A ( self : Tuple ) -> int: # A mock response for an HTTP head request to emulate server down __magic_name__ = mock.Mock() __magic_name__ = 5_00 __magic_name__ = {} __magic_name__ = HTTPError __magic_name__ = {} # Download this model to make sure it's in the cache. __magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=_lowerCamelCase ) as mock_head: __magic_name__ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" ) # This check we did call the fake head request mock_head.assert_called() def __A ( self : Union[str, Any] ) -> Dict: # This test is for deprecated behavior and can be removed in v5 __magic_name__ = BertConfig.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" ) def __A ( self : Dict ) -> Optional[int]: __magic_name__ = AutoConfig.from_pretrained("bert-base-cased" ) __magic_name__ = ["config.4.0.0.json"] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(_lowerCamelCase ) __magic_name__ = 2 json.dump(configuration.to_dict() , open(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , "w" ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __magic_name__ = ["config.42.0.0.json"] __magic_name__ = 7_68 configuration.save_pretrained(_lowerCamelCase ) shutil.move(os.path.join(_lowerCamelCase , "config.4.0.0.json" ) , os.path.join(_lowerCamelCase , "config.42.0.0.json" ) ) __magic_name__ = AutoConfig.from_pretrained(_lowerCamelCase ) self.assertEqual(new_configuration.hidden_size , 7_68 ) def __A ( self : Optional[int] ) -> str: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. __magic_name__ = "hf-internal-testing/test-two-configs" import transformers as new_transformers __magic_name__ = "v4.0.0" __magic_name__ , __magic_name__ = new_transformers.models.auto.AutoConfig.from_pretrained( _lowerCamelCase , return_unused_kwargs=_lowerCamelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(_lowerCamelCase , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __magic_name__ = "v3.0.0" __magic_name__ = old_transformers.models.auto.AutoConfig.from_pretrained(_lowerCamelCase ) self.assertEqual(old_configuration.hidden_size , 7_68 )
664
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class UpperCamelCase_ ( A ): """simple docstring""" def __init__( self : Any , _lowerCamelCase : int , _lowerCamelCase : List[str] ) -> List[str]: __magic_name__ = params __magic_name__ = np.array(_lowerCamelCase ) __magic_name__ = np.array([len(_lowerCamelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , _lowerCamelCase : List[str] ) -> Optional[int]: return (self.token_ids[index], self.lengths[index]) def __len__( self : Dict ) -> Optional[int]: return len(self.lengths ) def __A ( self : Optional[Any] ) -> List[Any]: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __A ( self : List[Any] ) -> Union[str, Any]: __magic_name__ = self.params.max_model_input_size __magic_name__ = self.lengths > max_len logger.info(f'Splitting {sum(_lowerCamelCase )} too long sequences.' ) def divide_chunks(_lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ): return [l[i : i + n] for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )] __magic_name__ = [] __magic_name__ = [] if self.params.mlm: __magic_name__ , __magic_name__ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: __magic_name__ , __magic_name__ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __magic_name__ = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __magic_name__ = np.insert(_lowerCamelCase , 0 , _lowerCamelCase ) if sub_s[-1] != sep_id: __magic_name__ = np.insert(_lowerCamelCase , len(_lowerCamelCase ) , _lowerCamelCase ) assert len(_lowerCamelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_lowerCamelCase ) new_tok_ids.extend(_lowerCamelCase ) new_lengths.extend([len(_lowerCamelCase ) for l in sub_seqs] ) __magic_name__ = np.array(_lowerCamelCase ) __magic_name__ = np.array(_lowerCamelCase ) def __A ( self : List[str] ) -> int: __magic_name__ = len(self ) __magic_name__ = self.lengths > 11 __magic_name__ = self.token_ids[indices] __magic_name__ = self.lengths[indices] __magic_name__ = len(self ) logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' ) def __A ( self : List[str] ) -> List[str]: if "unk_token" not in self.params.special_tok_ids: return else: __magic_name__ = self.params.special_tok_ids["unk_token"] __magic_name__ = len(self ) __magic_name__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __magic_name__ = (unk_occs / self.lengths) < 0.5 __magic_name__ = self.token_ids[indices] __magic_name__ = self.lengths[indices] __magic_name__ = len(self ) logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' ) def __A ( self : Dict ) -> List[str]: if not self.params.is_master: return logger.info(f'{len(self )} sequences' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __A ( self : str , _lowerCamelCase : Optional[int] ) -> Dict: __magic_name__ = [t[0] for t in batch] __magic_name__ = [t[1] for t in batch] assert len(_lowerCamelCase ) == len(_lowerCamelCase ) # Max for paddings __magic_name__ = max(_lowerCamelCase ) # Pad token ids if self.params.mlm: __magic_name__ = self.params.special_tok_ids["pad_token"] else: __magic_name__ = self.params.special_tok_ids["unk_token"] __magic_name__ = [list(t.astype(_lowerCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(_lowerCamelCase )) for t in token_ids] assert len(tk_ ) == len(_lowerCamelCase ) assert all(len(_lowerCamelCase ) == max_seq_len_ for t in tk_ ) __magic_name__ = torch.tensor(tk_ ) # (bs, max_seq_len_) __magic_name__ = torch.tensor(_lowerCamelCase ) # (bs) return tk_t, lg_t
664
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict: __magic_name__ = size if size is not None else {"height": 18, "width": 18} __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = num_channels __magic_name__ = image_size __magic_name__ = min_resolution __magic_name__ = max_resolution __magic_name__ = do_resize __magic_name__ = size __magic_name__ = do_normalize __magic_name__ = image_mean __magic_name__ = image_std def __A ( self : int ) -> List[str]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase_ ( A , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None def __A ( self : Dict ) -> Any: __magic_name__ = DPTImageProcessingTester(self ) @property def __A ( self : str ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Tuple ) -> List[str]: __magic_name__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(_lowerCamelCase , "size" ) ) def __A ( self : List[str] ) -> List[Any]: __magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) __magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def __A ( self : Union[str, Any] ) -> List[str]: # Initialize image_processing __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __A ( self : Dict ) -> Optional[Any]: # Initialize image_processing __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __A ( self : Optional[int] ) -> Dict: # Initialize image_processing __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
664
1
'''simple docstring''' from collections import namedtuple __magic_name__ : List[str] =namedtuple('from_to', 'from_ to') __magic_name__ : Tuple ={ 'cubicmeter': from_to(1, 1), 'litre': from_to(0.0_0_1, 10_00), 'kilolitre': from_to(1, 1), 'gallon': from_to(0.0_0_4_5_4, 2_6_4.1_7_2), 'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5), 'cubicfoot': from_to(0.0_2_8, 3_5.3_1_4_7), 'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5), } def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : str , lowerCamelCase_ : str ): '''simple docstring''' if from_type not in METRIC_CONVERSION: raise ValueError( F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n' + ", ".join(lowerCamelCase_ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n' + ", ".join(lowerCamelCase_ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
664
'''simple docstring''' import numpy class UpperCamelCase_ : """simple docstring""" def __init__( self : Union[str, Any] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : numpy.ndarray ) -> None: __magic_name__ = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. __magic_name__ = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. __magic_name__ = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. __magic_name__ = numpy.random.rand(3 , 1 ) # Real output values provided. __magic_name__ = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. __magic_name__ = numpy.zeros(output_array.shape ) def __A ( self : int ) -> numpy.ndarray: __magic_name__ = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. __magic_name__ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. __magic_name__ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def __A ( self : Dict ) -> None: __magic_name__ = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) __magic_name__ = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) __magic_name__ = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def __A ( self : Optional[int] , _lowerCamelCase : numpy.ndarray , _lowerCamelCase : int , _lowerCamelCase : bool ) -> None: for iteration in range(1 , iterations + 1 ): __magic_name__ = self.feedforward() self.back_propagation() if give_loss: __magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) ) print(f'Iteration {iteration} Loss: {loss}' ) def __A ( self : Tuple , _lowerCamelCase : numpy.ndarray ) -> int: __magic_name__ = input_arr __magic_name__ = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) __magic_name__ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) __magic_name__ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def __snake_case ( lowerCamelCase_ : numpy.ndarray ): '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def __snake_case ( lowerCamelCase_ : numpy.ndarray ): '''simple docstring''' return (value) * (1 - (value)) def __snake_case ( ): '''simple docstring''' __magic_name__ = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. __magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. __magic_name__ = TwoHiddenLayerNeuralNetwork( input_array=lowerCamelCase_ , output_array=lowerCamelCase_ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=lowerCamelCase_ , iterations=10 , give_loss=lowerCamelCase_ ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
664
1
'''simple docstring''' from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( A ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = ['''image_processor''', '''tokenizer'''] UpperCAmelCase__ : Optional[int] = '''AutoImageProcessor''' UpperCAmelCase__ : Tuple = '''AutoTokenizer''' def __init__( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Dict ) -> Optional[int]: super().__init__(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = self.image_processor def __call__( self : List[Any] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Tuple=None , **_lowerCamelCase : Tuple ) -> Optional[Any]: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: __magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if images is not None: __magic_name__ = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) if text is not None and images is not None: __magic_name__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase ) def __A ( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Any ) -> List[str]: return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def __A ( self : Optional[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Dict ) -> Tuple: return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @property def __A ( self : List[str] ) -> Union[str, Any]: return ["input_ids", "attention_mask", "pixel_values"]
664
'''simple docstring''' import torch from transformers import AutoModel class UpperCamelCase_ ( torch.nn.Module ): """simple docstring""" def __init__( self : Any , _lowerCamelCase : Optional[int]="sayef/fsner-bert-base-uncased" ) -> List[Any]: super(_lowerCamelCase , self ).__init__() __magic_name__ = AutoModel.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase ) __magic_name__ = torch.nn.CosineSimilarity(3 , 1e-08 ) __magic_name__ = torch.nn.Softmax(dim=1 ) def __A ( self : Tuple , **_lowerCamelCase : Union[str, Any] ) -> Optional[int]: return self.bert(**_lowerCamelCase ).last_hidden_state def __A ( self : Dict , _lowerCamelCase : Dict ) -> Dict: return token_embeddings.sum(2 , keepdim=_lowerCamelCase ) def __A ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple=1 ) -> Optional[Any]: return self.softmax(T * self.cos(_lowerCamelCase , _lowerCamelCase ) ) def __A ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ) -> List[str]: __magic_name__ = W_supports["sizes"].tolist() __magic_name__ = W_supports["start_token_id"].item() __magic_name__ = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __magic_name__ = self.BERT(**_lowerCamelCase ) __magic_name__ = self.BERT(**_lowerCamelCase ) __magic_name__ = None __magic_name__ = None __magic_name__ = W_supports["input_ids"] == start_token_id __magic_name__ = W_supports["input_ids"] == end_token_id for i, size in enumerate(_lowerCamelCase ): if i == 0: __magic_name__ = 0 else: __magic_name__ = support_sizes[i - 1] __magic_name__ = S[s : s + size][start_token_masks[s : s + size]] __magic_name__ = S[s : s + size][end_token_masks[s : s + size]] __magic_name__ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __magic_name__ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __magic_name__ = torch.vstack((p_starts, p_start) ) __magic_name__ = torch.vstack((p_ends, p_end) ) else: __magic_name__ = p_start __magic_name__ = p_end return p_starts, p_ends
664
1
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters __magic_name__ : Any =False __magic_name__ : List[Any] =False def __snake_case ( lowerCamelCase_ : Namespace ): '''simple docstring''' return TrainCommand(lowerCamelCase_ ) class UpperCamelCase_ ( A ): """simple docstring""" @staticmethod def __A ( _lowerCamelCase : ArgumentParser ) -> List[str]: __magic_name__ = parser.add_parser("train" , help="CLI tool to train a model on a task." ) train_parser.add_argument( "--train_data" , type=_lowerCamelCase , required=_lowerCamelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , ) train_parser.add_argument( "--column_label" , type=_lowerCamelCase , default=0 , help="Column of the dataset csv file with example labels." ) train_parser.add_argument( "--column_text" , type=_lowerCamelCase , default=1 , help="Column of the dataset csv file with example texts." ) train_parser.add_argument( "--column_id" , type=_lowerCamelCase , default=2 , help="Column of the dataset csv file with example ids." ) train_parser.add_argument( "--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." ) train_parser.add_argument("--validation_data" , type=_lowerCamelCase , default="" , help="path to validation dataset." ) train_parser.add_argument( "--validation_split" , type=_lowerCamelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , ) train_parser.add_argument("--output" , type=_lowerCamelCase , default="./" , help="path to saved the trained model." ) train_parser.add_argument( "--task" , type=_lowerCamelCase , default="text_classification" , help="Task to train the model on." ) train_parser.add_argument( "--model" , type=_lowerCamelCase , default="bert-base-uncased" , help="Model's name or path to stored model." ) train_parser.add_argument("--train_batch_size" , type=_lowerCamelCase , default=32 , help="Batch size for training." ) train_parser.add_argument("--valid_batch_size" , type=_lowerCamelCase , default=64 , help="Batch size for validation." ) train_parser.add_argument("--learning_rate" , type=_lowerCamelCase , default=3e-5 , help="Learning rate." ) train_parser.add_argument("--adam_epsilon" , type=_lowerCamelCase , default=1e-08 , help="Epsilon for Adam optimizer." ) train_parser.set_defaults(func=_lowerCamelCase ) def __init__( self : Tuple , _lowerCamelCase : Namespace ) -> List[Any]: __magic_name__ = logging.get_logger("transformers-cli/training" ) __magic_name__ = "tf" if is_tf_available() else "torch" os.makedirs(args.output , exist_ok=_lowerCamelCase ) __magic_name__ = args.output __magic_name__ = args.column_label __magic_name__ = args.column_text __magic_name__ = args.column_id self.logger.info(f'Loading {args.task} pipeline for {args.model}' ) if args.task == "text_classification": __magic_name__ = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f'Loading dataset from {args.train_data}' ) __magic_name__ = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __magic_name__ = None if args.validation_data: self.logger.info(f'Loading validation dataset from {args.validation_data}' ) __magic_name__ = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) __magic_name__ = args.validation_split __magic_name__ = args.train_batch_size __magic_name__ = args.valid_batch_size __magic_name__ = args.learning_rate __magic_name__ = args.adam_epsilon def __A ( self : Optional[int] ) -> Dict: if self.framework == "tf": return self.run_tf() return self.run_torch() def __A ( self : Dict ) -> str: raise NotImplementedError def __A ( self : int ) -> List[Any]: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
664
'''simple docstring''' # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
664
1
'''simple docstring''' __magic_name__ : List[Any] ={ 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', } def __snake_case ( lowerCamelCase_ : float ): '''simple docstring''' assert type(lowerCamelCase_ ) in (int, float) and decimal == int(lowerCamelCase_ ) __magic_name__ = int(lowerCamelCase_ ) __magic_name__ = "" __magic_name__ = False if decimal < 0: __magic_name__ = True decimal *= -1 while decimal > 0: __magic_name__ , __magic_name__ = divmod(lowerCamelCase_ , 16 ) __magic_name__ = values[remainder] + hexadecimal __magic_name__ = "0x" + hexadecimal if negative: __magic_name__ = "-" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
664
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __snake_case ( lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' __magic_name__ = AutoConfig.from_pretrained(lowerCamelCase_ ) __magic_name__ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ ) __magic_name__ = checkpoints.load_tax_checkpoint(lowerCamelCase_ ) __magic_name__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": __magic_name__ = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": __magic_name__ = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global]." ) # Encoder for layer_index in range(config.num_layers ): __magic_name__ = F'layers_{str(lowerCamelCase_ )}' # Self-Attention __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization __magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: __magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: __magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] __magic_name__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization __magic_name__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning __magic_name__ = flax_model.params["encoder"]["block"][str(lowerCamelCase_ )]["layer"] __magic_name__ = tax_attention_key __magic_name__ = tax_attention_out __magic_name__ = tax_attention_query __magic_name__ = tax_attention_value __magic_name__ = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ = tax_global_layer_norm if split_mlp_wi: __magic_name__ = tax_mlp_wi_a __magic_name__ = tax_mlp_wi_a else: __magic_name__ = tax_mlp_wi __magic_name__ = tax_mlp_wo __magic_name__ = tax_mlp_layer_norm __magic_name__ = flax_model_encoder_layer_block # Only for layer 0: __magic_name__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T __magic_name__ = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T __magic_name__ = tax_encoder_global_rel_embedding # Assigning __magic_name__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"] __magic_name__ = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): __magic_name__ = F'layers_{str(lowerCamelCase_ )}' # Self-Attention __magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization __magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention __magic_name__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] __magic_name__ = tax_enc_dec_attention_module["key"]["kernel"] __magic_name__ = tax_enc_dec_attention_module["out"]["kernel"] __magic_name__ = tax_enc_dec_attention_module["query"]["kernel"] __magic_name__ = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization __magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: __magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: __magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] __magic_name__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization __magic_name__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning __magic_name__ = flax_model.params["decoder"]["block"][str(lowerCamelCase_ )]["layer"] __magic_name__ = tax_attention_key __magic_name__ = tax_attention_out __magic_name__ = tax_attention_query __magic_name__ = tax_attention_value __magic_name__ = tax_pre_attention_layer_norm __magic_name__ = tax_enc_dec_attention_key __magic_name__ = tax_enc_dec_attention_out __magic_name__ = tax_enc_dec_attention_query __magic_name__ = tax_enc_dec_attention_value __magic_name__ = tax_cross_layer_norm if split_mlp_wi: __magic_name__ = tax_mlp_wi_a __magic_name__ = tax_mlp_wi_a else: __magic_name__ = tax_mlp_wi __magic_name__ = tax_mlp_wo __magic_name__ = txa_mlp_layer_norm __magic_name__ = flax_model_decoder_layer_block # Decoder Normalization __magic_name__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"] __magic_name__ = txa_decoder_norm # Only for layer 0: __magic_name__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T __magic_name__ = tax_decoder_rel_embedding # Token Embeddings __magic_name__ = tax_model["target"]["token_embedder"]["embedding"] __magic_name__ = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: __magic_name__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(lowerCamelCase_ ) print("T5X Model was sucessfully converted!" ) if __name__ == "__main__": __magic_name__ : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) __magic_name__ : Optional[int] =parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
664
1
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer __magic_name__ : int =logging.get_logger(__name__) __magic_name__ : Tuple ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __magic_name__ : int ={ 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __magic_name__ : List[str] ={ 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __magic_name__ : List[str] ={ 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __magic_name__ : List[str] ={ 'facebook/dpr-ctx_encoder-single-nq-base': 5_12, 'facebook/dpr-ctx_encoder-multiset-base': 5_12, } __magic_name__ : Dict ={ 'facebook/dpr-question_encoder-single-nq-base': 5_12, 'facebook/dpr-question_encoder-multiset-base': 5_12, } __magic_name__ : int ={ 'facebook/dpr-reader-single-nq-base': 5_12, 'facebook/dpr-reader-multiset-base': 5_12, } __magic_name__ : List[Any] ={ 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __magic_name__ : List[Any] ={ 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __magic_name__ : List[Any] ={ 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCamelCase_ ( A ): """simple docstring""" UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ : Optional[int] = DPRContextEncoderTokenizer class UpperCamelCase_ ( A ): """simple docstring""" UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES UpperCAmelCase__ : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[str] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ : Any = DPRQuestionEncoderTokenizer __magic_name__ : Optional[int] =collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __magic_name__ : List[str] =collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __magic_name__ : Union[str, Any] =R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(A ) class UpperCamelCase_ : """simple docstring""" def __call__( self : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Union[bool, str] = False , _lowerCamelCase : Union[bool, str] = False , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : Optional[bool] = None , **_lowerCamelCase : Union[str, Any] , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , ) elif titles is None or texts is None: __magic_name__ = titles if texts is None else texts return super().__call__( _lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , ) __magic_name__ = titles if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [titles] __magic_name__ = texts if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [texts] __magic_name__ = len(_lowerCamelCase ) __magic_name__ = questions if not isinstance(_lowerCamelCase , _lowerCamelCase ) else [questions] * n_passages assert len(_lowerCamelCase ) == len( _lowerCamelCase ), f'There should be as many titles than texts but got {len(_lowerCamelCase )} titles and {len(_lowerCamelCase )} texts.' __magic_name__ = super().__call__(_lowerCamelCase , _lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )["input_ids"] __magic_name__ = super().__call__(_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase )["input_ids"] __magic_name__ = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_lowerCamelCase , _lowerCamelCase ) ] } if return_attention_mask is not False: __magic_name__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __magic_name__ = attention_mask return self.pad(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors=_lowerCamelCase ) def __A ( self : List[str] , _lowerCamelCase : BatchEncoding , _lowerCamelCase : DPRReaderOutput , _lowerCamelCase : int = 16 , _lowerCamelCase : int = 64 , _lowerCamelCase : int = 4 , ) -> List[DPRSpanPrediction]: __magic_name__ = reader_input["input_ids"] __magic_name__ , __magic_name__ , __magic_name__ = reader_output[:3] __magic_name__ = len(_lowerCamelCase ) __magic_name__ = sorted(range(_lowerCamelCase ) , reverse=_lowerCamelCase , key=relevance_logits.__getitem__ ) __magic_name__ = [] for doc_id in sorted_docs: __magic_name__ = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __magic_name__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __magic_name__ = sequence_ids.index(self.pad_token_id ) else: __magic_name__ = len(_lowerCamelCase ) __magic_name__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCamelCase , top_spans=_lowerCamelCase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCamelCase , start_index=_lowerCamelCase , end_index=_lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(_lowerCamelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __A ( self : Any , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , _lowerCamelCase : int , _lowerCamelCase : int , ) -> List[DPRSpanPrediction]: __magic_name__ = [] for start_index, start_score in enumerate(_lowerCamelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __magic_name__ = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase ) __magic_name__ = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]' __magic_name__ = end_index - start_index + 1 assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_lowerCamelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A ) class UpperCamelCase_ ( A , A ): """simple docstring""" UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : int = READER_PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ : Any = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ : Any = DPRReaderTokenizer
664
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class UpperCamelCase_ ( unittest.TestCase , A ): """simple docstring""" def __A ( self : Optional[int] ) -> Any: __magic_name__ = load_tool("text-to-speech" ) self.tool.setup() def __A ( self : Union[str, Any] ) -> int: # SpeechT5 isn't deterministic torch.manual_seed(0 ) __magic_name__ = self.tool("hey" ) __magic_name__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) ) def __A ( self : List[str] ) -> int: # SpeechT5 isn't deterministic torch.manual_seed(0 ) __magic_name__ = self.tool("hey" ) __magic_name__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
664
1
'''simple docstring''' from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ : int =logging.get_logger(__name__) __magic_name__ : List[str] ={ 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json' ), } class UpperCamelCase_ ( A ): """simple docstring""" UpperCAmelCase__ : List[Any] = '''xlm-prophetnet''' UpperCAmelCase__ : Union[str, Any] = ['''past_key_values'''] UpperCAmelCase__ : Union[str, Any] = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : Optional[Any] , _lowerCamelCase : Optional[float] = 0.1 , _lowerCamelCase : Optional[Union[str, Callable]] = "gelu" , _lowerCamelCase : Optional[int] = 3_05_22 , _lowerCamelCase : Optional[int] = 10_24 , _lowerCamelCase : Optional[int] = 40_96 , _lowerCamelCase : Optional[int] = 12 , _lowerCamelCase : Optional[int] = 16 , _lowerCamelCase : Optional[int] = 40_96 , _lowerCamelCase : Optional[int] = 12 , _lowerCamelCase : Optional[int] = 16 , _lowerCamelCase : Optional[float] = 0.1 , _lowerCamelCase : Optional[float] = 0.1 , _lowerCamelCase : Optional[int] = 5_12 , _lowerCamelCase : Optional[float] = 0.02 , _lowerCamelCase : Optional[bool] = True , _lowerCamelCase : Optional[bool] = True , _lowerCamelCase : Optional[int] = 0 , _lowerCamelCase : Optional[int] = 2 , _lowerCamelCase : Optional[int] = 32 , _lowerCamelCase : Optional[int] = 1_28 , _lowerCamelCase : Optional[bool] = False , _lowerCamelCase : Optional[float] = 0.0 , _lowerCamelCase : Optional[bool] = True , _lowerCamelCase : Optional[int] = 0 , _lowerCamelCase : Optional[int] = 1 , _lowerCamelCase : Optional[int] = 2 , **_lowerCamelCase : List[str] , ) -> Union[str, Any]: __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = encoder_ffn_dim __magic_name__ = num_encoder_layers __magic_name__ = num_encoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = num_decoder_layers __magic_name__ = num_decoder_attention_heads __magic_name__ = max_position_embeddings __magic_name__ = init_std # Normal(0, this parameter) __magic_name__ = activation_function # parameters for xlmprophetnet __magic_name__ = ngram __magic_name__ = num_buckets __magic_name__ = relative_max_distance __magic_name__ = disable_ngram_loss __magic_name__ = eps # 3 Types of Dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = dropout __magic_name__ = use_cache super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , add_cross_attention=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , ) @property def __A ( self : int ) -> int: return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def __A ( self : Tuple , _lowerCamelCase : List[str] ) -> Any: raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
664
'''simple docstring''' import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm __magic_name__ : Dict =re.compile('[^A-Za-z_0-9]') # parameters used in DuplicationIndex __magic_name__ : int =10 __magic_name__ : Union[str, Any] =2_56 def __snake_case ( lowerCamelCase_ : List[str] ): '''simple docstring''' if len(lowerCamelCase_ ) < MIN_NUM_TOKENS: return None __magic_name__ = MinHash(num_perm=lowerCamelCase_ ) for token in set(lowerCamelCase_ ): min_hash.update(token.encode() ) return min_hash def __snake_case ( lowerCamelCase_ : str ): '''simple docstring''' return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0} class UpperCamelCase_ : """simple docstring""" def __init__( self : int , *, _lowerCamelCase : float = 0.85 , ) -> Optional[Any]: __magic_name__ = duplication_jaccard_threshold __magic_name__ = NUM_PERM __magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) __magic_name__ = defaultdict(_lowerCamelCase ) def __A ( self : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : MinHash ) -> None: __magic_name__ = self._index.query(_lowerCamelCase ) if code_key in self._index.keys: print(f'Duplicate key {code_key}' ) return self._index.insert(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(_lowerCamelCase ) break else: self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase ) def __A ( self : Union[str, Any] ) -> List[List[Dict]]: __magic_name__ = [] for base, duplicates in self._duplicate_clusters.items(): __magic_name__ = [base] + list(_lowerCamelCase ) # reformat the cluster to be a list of dict __magic_name__ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster] duplicate_clusters.append(_lowerCamelCase ) return duplicate_clusters def __A ( self : Tuple , _lowerCamelCase : Tuple ) -> None: __magic_name__ = self.get_duplicate_clusters() with open(_lowerCamelCase , "w" ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) def __snake_case ( lowerCamelCase_ : List[Any] ): '''simple docstring''' __magic_name__ , __magic_name__ = element __magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def __snake_case ( lowerCamelCase_ : Type[Dataset] ): '''simple docstring''' with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=1_0000 ) , chunksize=100 , ): if data is not None: yield data def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float ): '''simple docstring''' __magic_name__ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ): di.add(lowerCamelCase_ , lowerCamelCase_ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ): '''simple docstring''' __magic_name__ = get_tokens(lowerCamelCase_ ) __magic_name__ = get_tokens(lowerCamelCase_ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) __magic_name__ : List[str] =None def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] ): '''simple docstring''' __magic_name__ = [] for elementa in cluster: __magic_name__ = _shared_dataset[elementa["base_index"]]["content"] for elementa in extremes: __magic_name__ = _shared_dataset[elementa["base_index"]]["content"] if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold: elementa["copies"] += 1 break else: __magic_name__ = 1 extremes.append(lowerCamelCase_ ) return extremes def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' global _shared_dataset __magic_name__ = dataset __magic_name__ = [] __magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ): extremes_list.append(lowerCamelCase_ ) return extremes_list def __snake_case ( lowerCamelCase_ : Type[Dataset] , lowerCamelCase_ : float = 0.85 ): '''simple docstring''' __magic_name__ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ ) __magic_name__ = {x["base_index"] for cluster in duplicate_clusters for x in cluster} __magic_name__ = {} __magic_name__ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for extremes in extremes_clusters: for element in extremes: __magic_name__ = element __magic_name__ = duplicate_indices - set(extreme_dict.keys() ) __magic_name__ = dataset.filter(lambda lowerCamelCase_ , lowerCamelCase_ : idx not in remove_indices , with_indices=lowerCamelCase_ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: __magic_name__ = element["base_index"] in extreme_dict if element["is_extreme"]: __magic_name__ = extreme_dict[element["base_index"]]["copies"] print(F'Original dataset size: {len(lowerCamelCase_ )}' ) print(F'Number of duplicate clusters: {len(lowerCamelCase_ )}' ) print(F'Files in duplicate cluster: {len(lowerCamelCase_ )}' ) print(F'Unique files in duplicate cluster: {len(lowerCamelCase_ )}' ) print(F'Filtered dataset size: {len(lowerCamelCase_ )}' ) return ds_filter, duplicate_clusters
664
1
'''simple docstring''' from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ : """simple docstring""" def __init__( self : Any , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : List[Any]=30 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : int=3 , _lowerCamelCase : Tuple=True , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Tuple=32 , _lowerCamelCase : int=2 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : List[str]=37 , _lowerCamelCase : Tuple="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : List[str]=0.02 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : Optional[Any]=0.6 , _lowerCamelCase : str=None , ) -> Union[str, Any]: __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = is_training __magic_name__ = use_labels __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = mask_ratio __magic_name__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) __magic_name__ = (image_size // patch_size) ** 2 __magic_name__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __A ( self : Any ) -> str: __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = self.get_config() return config, pixel_values, labels def __A ( self : str ) -> Tuple: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __A ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ) -> Union[str, Any]: __magic_name__ = TFViTMAEModel(config=_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase , training=_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ) -> int: __magic_name__ = TFViTMAEForPreTraining(_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase , training=_lowerCamelCase ) # expected sequence length = num_patches __magic_name__ = (self.image_size // self.patch_size) ** 2 __magic_name__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images __magic_name__ = 1 __magic_name__ = TFViTMAEForPreTraining(_lowerCamelCase ) __magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __magic_name__ = model(_lowerCamelCase , training=_lowerCamelCase ) __magic_name__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def __A ( self : Dict ) -> int: __magic_name__ = self.prepare_config_and_inputs() ((__magic_name__) , (__magic_name__) , (__magic_name__)) = config_and_inputs __magic_name__ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( A , A , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () UpperCAmelCase__ : Union[str, Any] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} UpperCAmelCase__ : Any = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : List[str] = False def __A ( self : Optional[Any] ) -> Union[str, Any]: __magic_name__ = TFViTMAEModelTester(self ) __magic_name__ = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def __A ( self : int ) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def __A ( self : Tuple ) -> Union[str, Any]: pass def __A ( self : Dict ) -> Tuple: __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __magic_name__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) ) def __A ( self : Any ) -> Optional[Any]: __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(_lowerCamelCase ) __magic_name__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def __A ( self : int ) -> Tuple: __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def __A ( self : Optional[int] ) -> Dict: __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase ) def __A ( self : Dict ) -> List[str]: # make the mask reproducible np.random.seed(2 ) __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = int((config.image_size // config.patch_size) ** 2 ) __magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: __magic_name__ = model_class(_lowerCamelCase ) __magic_name__ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = model(_lowerCamelCase , noise=_lowerCamelCase ) __magic_name__ = copy.deepcopy(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) __magic_name__ = model(**_lowerCamelCase , noise=_lowerCamelCase ) __magic_name__ = outputs_dict[0].numpy() __magic_name__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 ) def __A ( self : Tuple ) -> Union[str, Any]: # make the mask reproducible np.random.seed(2 ) __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = int((config.image_size // config.patch_size) ** 2 ) __magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_lowerCamelCase : Optional[Any] ): __magic_name__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(_lowerCamelCase ): __magic_name__ = v.numpy() else: __magic_name__ = np.array(_lowerCamelCase ) return inputs_np_dict for model_class in self.all_model_classes: __magic_name__ = model_class(_lowerCamelCase ) __magic_name__ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = prepare_numpy_arrays(_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase , noise=_lowerCamelCase ) __magic_name__ = model(**_lowerCamelCase , noise=_lowerCamelCase ) self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase ) def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ) -> Union[str, Any]: # make masks reproducible np.random.seed(2 ) __magic_name__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) __magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) __magic_name__ = tf.constant(_lowerCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument __magic_name__ = tf_noise super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def __A ( self : int ) -> Tuple: # make mask reproducible np.random.seed(2 ) __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_lowerCamelCase ) if module_member_name.endswith("MainLayer" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )] for module_member in (getattr(_lowerCamelCase , _lowerCamelCase ),) if isinstance(_lowerCamelCase , _lowerCamelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_lowerCamelCase , "_keras_serializable" , _lowerCamelCase ) } __magic_name__ = int((config.image_size // config.patch_size) ** 2 ) __magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) __magic_name__ = tf.convert_to_tensor(_lowerCamelCase ) inputs_dict.update({"noise": noise} ) for main_layer_class in tf_main_layer_classes: __magic_name__ = main_layer_class(_lowerCamelCase ) __magic_name__ = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } __magic_name__ = tf.keras.Model(_lowerCamelCase , outputs=main_layer(_lowerCamelCase ) ) __magic_name__ = model(_lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(_lowerCamelCase , "keras_model.h5" ) model.save(_lowerCamelCase ) __magic_name__ = tf.keras.models.load_model( _lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_lowerCamelCase , tf.keras.Model ) __magic_name__ = model(_lowerCamelCase ) self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase ) @slow def __A ( self : List[Any] ) -> Dict: # make mask reproducible np.random.seed(2 ) __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = int((config.image_size // config.patch_size) ** 2 ) __magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: __magic_name__ = model_class(_lowerCamelCase ) __magic_name__ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = model(_lowerCamelCase , noise=_lowerCamelCase ) if model_class.__name__ == "TFViTMAEModel": __magic_name__ = outputs.last_hidden_state.numpy() __magic_name__ = 0 else: __magic_name__ = outputs.logits.numpy() __magic_name__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCamelCase , saved_model=_lowerCamelCase ) __magic_name__ = model_class.from_pretrained(_lowerCamelCase ) __magic_name__ = model(_lowerCamelCase , noise=_lowerCamelCase ) if model_class.__name__ == "TFViTMAEModel": __magic_name__ = after_outputs["last_hidden_state"].numpy() __magic_name__ = 0 else: __magic_name__ = after_outputs["logits"].numpy() __magic_name__ = 0 __magic_name__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_lowerCamelCase , 1e-5 ) def __A ( self : List[Any] ) -> int: # make mask reproducible np.random.seed(2 ) __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = int((config.image_size // config.patch_size) ** 2 ) __magic_name__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: __magic_name__ = model_class(_lowerCamelCase ) __magic_name__ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = model(_lowerCamelCase , noise=_lowerCamelCase ) __magic_name__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_lowerCamelCase ) __magic_name__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config __magic_name__ = model_class.from_config(model.config ) __magic_name__ = new_model(_lowerCamelCase ) # Build model new_model.set_weights(model.get_weights() ) __magic_name__ = new_model(_lowerCamelCase , noise=_lowerCamelCase ) self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def __A ( self : Optional[Any] ) -> str: pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def __A ( self : int ) -> Optional[Any]: pass @slow def __A ( self : Any ) -> List[Any]: __magic_name__ = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(_lowerCamelCase ) def __snake_case ( ): '''simple docstring''' __magic_name__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : Dict ) -> List[Any]: return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def __A ( self : Union[str, Any] ) -> Any: # make random mask reproducible across the PT and TF model np.random.seed(2 ) __magic_name__ = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ) __magic_name__ = self.default_image_processor __magic_name__ = prepare_img() __magic_name__ = image_processor(images=_lowerCamelCase , return_tensors="tf" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) __magic_name__ = ViTMAEConfig() __magic_name__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) __magic_name__ = np.random.uniform(size=(1, num_patches) ) # forward pass __magic_name__ = model(**_lowerCamelCase , noise=_lowerCamelCase ) # verify the logits __magic_name__ = tf.convert_to_tensor([1, 1_96, 7_68] ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) __magic_name__ = tf.convert_to_tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
664
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('0.8.3'): raise Exception('requires gluonnlp == 0.8.3') if version.parse(mx.__version__) != version.parse('1.5.0'): raise Exception('requires mxnet == 1.5.0') logging.set_verbosity_info() __magic_name__ : Optional[int] =logging.get_logger(__name__) __magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!' def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ): '''simple docstring''' __magic_name__ = { "attention_cell": "multi_head", "num_layers": 4, "units": 1024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1e-5, "token_type_vocab_size": 2, } __magic_name__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __magic_name__ = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __magic_name__ = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab __magic_name__ = os.path.join(get_home_dir() , "models" ) __magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ ) __magic_name__ = nlp.model.BERTModel( lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , ) original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ ) __magic_name__ = original_bort._collect_params_with_prefix() # Build our config 🤗 __magic_name__ = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(lowerCamelCase_ ), } __magic_name__ = BertConfig.from_dict(lowerCamelCase_ ) __magic_name__ = BertForMaskedLM(lowerCamelCase_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ): __magic_name__ = hf_param.shape __magic_name__ = to_torch(params[gluon_param] ) __magic_name__ = gluon_param.shape assert ( shape_hf == shape_gluon ), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param __magic_name__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) __magic_name__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) __magic_name__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) __magic_name__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __magic_name__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __magic_name__ = hf_bort_model.bert.encoder.layer[i] # self attention __magic_name__ = layer.attention.self __magic_name__ = check_and_map_params( self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) __magic_name__ = check_and_map_params( self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) __magic_name__ = check_and_map_params( self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) __magic_name__ = check_and_map_params( self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) __magic_name__ = check_and_map_params( self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) __magic_name__ = check_and_map_params( self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output __magic_name__ = layer.attention.output __magic_name__ = check_and_map_params( self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' ) __magic_name__ = check_and_map_params( self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' ) __magic_name__ = check_and_map_params( self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' ) __magic_name__ = check_and_map_params( self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate __magic_name__ = layer.intermediate __magic_name__ = check_and_map_params( intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) __magic_name__ = check_and_map_params( intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output __magic_name__ = layer.output __magic_name__ = check_and_map_params( bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) __magic_name__ = check_and_map_params( bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) __magic_name__ = check_and_map_params( bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) __magic_name__ = check_and_map_params( bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" ) __magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"] # Get gluon output __magic_name__ = mx.nd.array([input_ids] ) __magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase_ ) __magic_name__ = BertModel.from_pretrained(lowerCamelCase_ ) hf_bort_model.eval() __magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" ) __magic_name__ = hf_bort_model(**lowerCamelCase_ )[0] __magic_name__ = output_gluon[0].asnumpy() __magic_name__ = output_hf[0].detach().numpy() __magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() __magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , lowerCamelCase_ ) if __name__ == "__main__": __magic_name__ : int =argparse.ArgumentParser() # Required parameters parser.add_argument( '--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __magic_name__ : Optional[Any] =parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
664
1
'''simple docstring''' __magic_name__ : str ='0.21.0' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
664
'''simple docstring''' def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) __magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b" __magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b" __magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) return "0b" + "".join( str(int(char_a == "1" and char_b == "1" ) ) for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
664
1
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __magic_name__ : Union[str, Any] ='\\n Text data.\n Second line of data.' __magic_name__ : Dict ='file' @pytest.fixture(scope="session" ) def __snake_case ( lowerCamelCase_ : Tuple ): '''simple docstring''' __magic_name__ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") __magic_name__ = bytes(lowerCamelCase_ , "utf-8" ) with zstd.open(lowerCamelCase_ , "wb" ) as f: f.write(lowerCamelCase_ ) return path @pytest.fixture def __snake_case ( lowerCamelCase_ : Any ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , lowerCamelCase_ ) , "w" ) as f: f.write(lowerCamelCase_ ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def __snake_case ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' __magic_name__ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} __magic_name__ = input_paths[compression_format] __magic_name__ = tmp_path / "cache" __magic_name__ = DownloadConfig(cache_dir=lowerCamelCase_ , extract_compressed_file=lowerCamelCase_ ) __magic_name__ = cached_path(lowerCamelCase_ , download_config=lowerCamelCase_ ) with open(lowerCamelCase_ ) as f: __magic_name__ = f.read() with open(lowerCamelCase_ ) as f: __magic_name__ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' __magic_name__ = "custom_cache" __magic_name__ = "custom_extracted_dir" __magic_name__ = tmp_path / "custom_extracted_path" if default_extracted: __magic_name__ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , lowerCamelCase_ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCamelCase_ ) ) __magic_name__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) __magic_name__ = xz_file __magic_name__ = ( DownloadConfig(extract_compressed_file=lowerCamelCase_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCamelCase_ ) ) __magic_name__ = cached_path(lowerCamelCase_ , download_config=lowerCamelCase_ ) assert Path(lowerCamelCase_ ).parent.parts[-2:] == expected def __snake_case ( lowerCamelCase_ : Tuple ): '''simple docstring''' __magic_name__ = str(Path(lowerCamelCase_ ).resolve() ) assert cached_path(lowerCamelCase_ ) == text_file # relative path __magic_name__ = str(Path(lowerCamelCase_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowerCamelCase_ ) == text_file def __snake_case ( lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' __magic_name__ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(lowerCamelCase_ ): cached_path(lowerCamelCase_ ) # relative path __magic_name__ = "./__missing_file__.txt" with pytest.raises(lowerCamelCase_ ): cached_path(lowerCamelCase_ ) def __snake_case ( lowerCamelCase_ : Optional[int] ): '''simple docstring''' __magic_name__ = get_from_cache(F'tmp://{tmpfs_file}' ) with open(lowerCamelCase_ ) as f: __magic_name__ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' with pytest.raises(lowerCamelCase_ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ) def __snake_case ( lowerCamelCase_ : List[str] ): '''simple docstring''' __magic_name__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase_ ): http_get("https://huggingface.co" , temp_file=lowerCamelCase_ ) with pytest.raises(lowerCamelCase_ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ) def __snake_case ( lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' __magic_name__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase_ ): ftp_get("ftp://huggingface.co" , temp_file=lowerCamelCase_ ) with pytest.raises(lowerCamelCase_ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ) def __snake_case ( lowerCamelCase_ : Any ): '''simple docstring''' __magic_name__ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(lowerCamelCase_ ): fsspec_get("s3://huggingface.co" , temp_file=lowerCamelCase_ ) with pytest.raises(lowerCamelCase_ ): fsspec_head("s3://huggingface.co" )
664
'''simple docstring''' import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __magic_name__ : Tuple =threading.Lock() __magic_name__ : Optional[logging.Handler] =None __magic_name__ : List[str] ={ 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } __magic_name__ : str =logging.WARNING __magic_name__ : Any =True def __snake_case ( ): '''simple docstring''' __magic_name__ = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, ' F'has to be one of: { ", ".join(log_levels.keys() ) }' ) return _default_log_level def __snake_case ( ): '''simple docstring''' return __name__.split("." )[0] def __snake_case ( ): '''simple docstring''' return logging.getLogger(_get_library_name() ) def __snake_case ( ): '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return __magic_name__ = logging.StreamHandler() # Set sys.stderr as stream. __magic_name__ = sys.stderr.flush # Apply our default configuration to the library root logger. __magic_name__ = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) __magic_name__ = False def __snake_case ( ): '''simple docstring''' global _default_handler with _lock: if not _default_handler: return __magic_name__ = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) __magic_name__ = None def __snake_case ( ): '''simple docstring''' return log_levels def __snake_case ( lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if name is None: __magic_name__ = _get_library_name() _configure_library_root_logger() return logging.getLogger(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def __snake_case ( lowerCamelCase_ : int ): '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' return set_verbosity(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' return set_verbosity(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' return set_verbosity(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' return set_verbosity(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def __snake_case ( lowerCamelCase_ : logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(lowerCamelCase_ ) def __snake_case ( lowerCamelCase_ : logging.Handler ): '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() __magic_name__ = False def __snake_case ( ): '''simple docstring''' _configure_library_root_logger() __magic_name__ = True def __snake_case ( ): '''simple docstring''' __magic_name__ = _get_library_root_logger().handlers for handler in handlers: __magic_name__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" ) handler.setFormatter(lowerCamelCase_ ) def __snake_case ( ): '''simple docstring''' __magic_name__ = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(lowerCamelCase_ ) def __snake_case ( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Any ): '''simple docstring''' __magic_name__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowerCamelCase_ ) if no_advisory_warnings: return self.warning(*lowerCamelCase_ , **lowerCamelCase_ ) __magic_name__ : int =warning_advice @functools.lru_cache(lowerCamelCase_ ) def __snake_case ( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : int ): '''simple docstring''' self.warning(*lowerCamelCase_ , **lowerCamelCase_ ) __magic_name__ : Optional[int] =warning_once class UpperCamelCase_ : """simple docstring""" def __init__( self : int , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ) -> Any: # pylint: disable=unused-argument __magic_name__ = args[0] if args else None def __iter__( self : int ) -> Tuple: return iter(self._iterator ) def __getattr__( self : List[Any] , _lowerCamelCase : int ) -> List[Any]: def empty_fn(*_lowerCamelCase : List[str] , **_lowerCamelCase : List[str] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : Optional[Any] ) -> Any: return self def __exit__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ) -> Dict: return class UpperCamelCase_ : """simple docstring""" def __call__( self : Any , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Any ) -> List[Any]: if _tqdm_active: return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase ) else: return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase ) def __A ( self : Optional[Any] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ) -> Union[str, Any]: __magic_name__ = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase ) def __A ( self : str ) -> Any: if _tqdm_active: return tqdm_lib.tqdm.get_lock() __magic_name__ : List[Any] =_tqdm_cls() def __snake_case ( ): '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def __snake_case ( ): '''simple docstring''' global _tqdm_active __magic_name__ = True hf_hub_utils.enable_progress_bars() def __snake_case ( ): '''simple docstring''' global _tqdm_active __magic_name__ = False hf_hub_utils.disable_progress_bars()
664
1
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]=7 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[Any]=18 , _lowerCamelCase : Union[str, Any]=30 , _lowerCamelCase : Tuple=4_00 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=True , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , _lowerCamelCase : Dict=[0.5, 0.5, 0.5] , ) -> Dict: __magic_name__ = size if size is not None else {"height": 18, "width": 18} __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = num_channels __magic_name__ = image_size __magic_name__ = min_resolution __magic_name__ = max_resolution __magic_name__ = do_resize __magic_name__ = size __magic_name__ = do_normalize __magic_name__ = image_mean __magic_name__ = image_std def __A ( self : int ) -> List[str]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase_ ( A , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = DPTImageProcessor if is_vision_available() else None def __A ( self : Dict ) -> Any: __magic_name__ = DPTImageProcessingTester(self ) @property def __A ( self : str ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Tuple ) -> List[str]: __magic_name__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , "image_mean" ) ) self.assertTrue(hasattr(_lowerCamelCase , "image_std" ) ) self.assertTrue(hasattr(_lowerCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(_lowerCamelCase , "do_resize" ) ) self.assertTrue(hasattr(_lowerCamelCase , "size" ) ) def __A ( self : List[str] ) -> List[Any]: __magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) __magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def __A ( self : Union[str, Any] ) -> List[str]: # Initialize image_processing __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __A ( self : Dict ) -> Optional[Any]: # Initialize image_processing __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __A ( self : Optional[int] ) -> Dict: # Initialize image_processing __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched __magic_name__ = image_processing(_lowerCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
664
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ : Union[str, Any] ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : str =[ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __magic_name__ : List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
664
1
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __magic_name__ : Optional[Any] =logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class UpperCamelCase_ ( unittest.TestCase ): """simple docstring""" def __A ( self : str , _lowerCamelCase : Path , _lowerCamelCase : Union[str, None] = None , _lowerCamelCase : Union[List[str], None] = None , _lowerCamelCase : Union[str, List[str], None] = None , _lowerCamelCase : bool = True , ) -> Optional[Any]: __magic_name__ = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase , _lowerCamelCase ) )] if identifier is not None: __magic_name__ = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_lowerCamelCase , _lowerCamelCase ): for n_ in n_identifier: __magic_name__ = [file for file in files if n_ not in file] else: __magic_name__ = [file for file in files if n_identifier not in file] __magic_name__ = ignore_files or [] ignore_files.append("__init__.py" ) __magic_name__ = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , _lowerCamelCase ) if only_modules: __magic_name__ = file.split("." )[0] try: __magic_name__ = getattr(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = doctest.DocTestSuite(_lowerCamelCase ) __magic_name__ = unittest.TextTestRunner().run(_lowerCamelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(f'{module_identifier} is not a module.' ) else: __magic_name__ = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def __A ( self : Dict ) -> Any: __magic_name__ = Path("src/transformers" ) __magic_name__ = "modeling" __magic_name__ = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase , ignore_files=_lowerCamelCase ) def __A ( self : Dict ) -> Optional[Any]: __magic_name__ = Path("src/transformers" ) __magic_name__ = "tokenization" self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase ) def __A ( self : List[Any] ) -> int: __magic_name__ = Path("src/transformers" ) __magic_name__ = "configuration" self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase ) def __A ( self : List[str] ) -> int: __magic_name__ = Path("src/transformers" ) __magic_name__ = ["configuration", "modeling", "tokenization"] self.analyze_directory(_lowerCamelCase , n_identifier=_lowerCamelCase ) def __A ( self : str ) -> Union[str, Any]: __magic_name__ = Path("docs/source" ) __magic_name__ = ["favicon.ico"] self.analyze_directory(_lowerCamelCase , ignore_files=_lowerCamelCase , only_modules=_lowerCamelCase )
664
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __magic_name__ : Optional[Any] ={ 'configuration_longformer': [ 'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongformerConfig', 'LongformerOnnxConfig', ], 'tokenization_longformer': ['LongformerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : int =['LongformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Dict =[ 'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongformerForMaskedLM', 'LongformerForMultipleChoice', 'LongformerForQuestionAnswering', 'LongformerForSequenceClassification', 'LongformerForTokenClassification', 'LongformerModel', 'LongformerPreTrainedModel', 'LongformerSelfAttention', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Tuple =[ 'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLongformerForMaskedLM', 'TFLongformerForMultipleChoice', 'TFLongformerForQuestionAnswering', 'TFLongformerForSequenceClassification', 'TFLongformerForTokenClassification', 'TFLongformerModel', 'TFLongformerPreTrainedModel', 'TFLongformerSelfAttention', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys __magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
664
1
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __magic_name__ : List[Any] =logging.get_logger(__name__) __magic_name__ : str ={ 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'encoder.layer_norm_for_extract': 'layer_norm_for_extract', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'label_embs_concat': 'label_embeddings_concat', 'mask_emb': 'masked_spec_embed', 'spk_proj': 'speaker_proj', } __magic_name__ : Optional[Any] =[ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'label_embeddings_concat', 'speaker_proj', 'layer_norm_for_extract', ] def __snake_case ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ): '''simple docstring''' for attribute in key.split("." ): __magic_name__ = getattr(lowerCamelCase_ , lowerCamelCase_ ) if weight_type is not None: __magic_name__ = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape else: __magic_name__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": __magic_name__ = value elif weight_type == "weight_g": __magic_name__ = value elif weight_type == "weight_v": __magic_name__ = value elif weight_type == "bias": __magic_name__ = value else: __magic_name__ = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def __snake_case ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' __magic_name__ = [] __magic_name__ = fairseq_model.state_dict() __magic_name__ = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __magic_name__ = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == "group" , ) __magic_name__ = True else: for key, mapped_key in MAPPING.items(): __magic_name__ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue __magic_name__ = True if "*" in mapped_key: __magic_name__ = name.split(lowerCamelCase_ )[0].split("." )[-2] __magic_name__ = mapped_key.replace("*" , lowerCamelCase_ ) if "weight_g" in name: __magic_name__ = "weight_g" elif "weight_v" in name: __magic_name__ = "weight_v" elif "bias" in name: __magic_name__ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj __magic_name__ = "weight" else: __magic_name__ = None set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) continue if not is_used: unused_weights.append(lowerCamelCase_ ) logger.warning(F'Unused weights: {unused_weights}' ) def __snake_case ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' __magic_name__ = full_name.split("conv_layers." )[-1] __magic_name__ = name.split("." ) __magic_name__ = int(items[0] ) __magic_name__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) __magic_name__ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) __magic_name__ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' ) __magic_name__ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) __magic_name__ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowerCamelCase_ ) @torch.no_grad() def __snake_case ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[Any]=True ): '''simple docstring''' if config_path is not None: __magic_name__ = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ ) else: __magic_name__ = UniSpeechSatConfig() __magic_name__ = "" if is_finetuned: __magic_name__ = UniSpeechSatForCTC(lowerCamelCase_ ) else: __magic_name__ = UniSpeechSatForPreTraining(lowerCamelCase_ ) __magic_name__ , __magic_name__ , __magic_name__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) __magic_name__ = model[0].eval() recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ ) hf_wavavec.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __magic_name__ : Union[str, Any] =argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __magic_name__ : Tuple =parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
664
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): __magic_name__ : str ={ 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: __magic_name__ : Tuple ={ 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def __snake_case ( lowerCamelCase_ : Optional[Any] ): '''simple docstring''' __magic_name__ = (images / 2 + 0.5).clamp(0 , 1 ) __magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __magic_name__ = numpy_to_pil(lowerCamelCase_ ) return images def __snake_case ( lowerCamelCase_ : Optional[Any] ): '''simple docstring''' if images.ndim == 3: __magic_name__ = images[None, ...] __magic_name__ = (images * 255).round().astype("uint8" ) if images.shape[-1] == 1: # special case for grayscale (single channel) images __magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images] else: __magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images] return pil_images
664
1
'''simple docstring''' from jiwer import compute_measures import datasets __magic_name__ : Optional[int] ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __magic_name__ : Any ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' __magic_name__ : Union[str, Any] ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): """simple docstring""" def __A ( self : Dict ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def __A ( self : Dict , _lowerCamelCase : Any=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=False ) -> Optional[int]: if concatenate_texts: return compute_measures(_lowerCamelCase , _lowerCamelCase )["wer"] else: __magic_name__ = 0 __magic_name__ = 0 for prediction, reference in zip(_lowerCamelCase , _lowerCamelCase ): __magic_name__ = compute_measures(_lowerCamelCase , _lowerCamelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
664
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __magic_name__ : Optional[Any] =logging.get_logger(__name__) @add_end_docstrings( A , r''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class UpperCamelCase_ ( A ): """simple docstring""" def __A ( self : Any , _lowerCamelCase : GenericTensor ) -> np.ndarray: if self.framework == "tf": __magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": __magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ) else: raise ValueError("Unsupported framework" ) return masked_index def __A ( self : str , _lowerCamelCase : GenericTensor ) -> np.ndarray: __magic_name__ = self.get_masked_index(_lowerCamelCase ) __magic_name__ = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , ) def __A ( self : int , _lowerCamelCase : GenericTensor ) -> Any: if isinstance(_lowerCamelCase , _lowerCamelCase ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(_lowerCamelCase ) def __A ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any=None , **_lowerCamelCase : List[str] ) -> Dict[str, GenericTensor]: if return_tensors is None: __magic_name__ = self.framework __magic_name__ = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase ) self.ensure_exactly_one_mask_token(_lowerCamelCase ) return model_inputs def __A ( self : List[str] , _lowerCamelCase : int ) -> List[Any]: __magic_name__ = self.model(**_lowerCamelCase ) __magic_name__ = model_inputs["input_ids"] return model_outputs def __A ( self : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Dict=None ) -> Dict: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: __magic_name__ = target_ids.shape[0] __magic_name__ = model_outputs["input_ids"][0] __magic_name__ = model_outputs["logits"] if self.framework == "tf": __magic_name__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] __magic_name__ = outputs.numpy() __magic_name__ = outputs[0, masked_index, :] __magic_name__ = stable_softmax(_lowerCamelCase , axis=-1 ) if target_ids is not None: __magic_name__ = tf.gather_nd(tf.squeeze(_lowerCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) ) __magic_name__ = tf.expand_dims(_lowerCamelCase , 0 ) __magic_name__ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase ) __magic_name__ , __magic_name__ = topk.values.numpy(), topk.indices.numpy() else: __magic_name__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCamelCase ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample __magic_name__ = outputs[0, masked_index, :] __magic_name__ = logits.softmax(dim=-1 ) if target_ids is not None: __magic_name__ = probs[..., target_ids] __magic_name__ , __magic_name__ = probs.topk(_lowerCamelCase ) __magic_name__ = [] __magic_name__ = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): __magic_name__ = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place __magic_name__ = input_ids.numpy().copy() if target_ids is not None: __magic_name__ = target_ids[p].tolist() __magic_name__ = p # Filter padding out: __magic_name__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back __magic_name__ = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) __magic_name__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(_lowerCamelCase ) result.append(_lowerCamelCase ) if single_mask: return result[0] return result def __A ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None ) -> List[str]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __magic_name__ = [targets] try: __magic_name__ = self.tokenizer.get_vocab() except Exception: __magic_name__ = {} __magic_name__ = [] for target in targets: __magic_name__ = vocab.get(_lowerCamelCase , _lowerCamelCase ) if id_ is None: __magic_name__ = self.tokenizer( _lowerCamelCase , add_special_tokens=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , max_length=1 , truncation=_lowerCamelCase , )["input_ids"] if len(_lowerCamelCase ) == 0: logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' "We cannot replace it with anything meaningful, ignoring it" ) continue __magic_name__ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' ) target_ids.append(id_ ) __magic_name__ = list(set(_lowerCamelCase ) ) if len(_lowerCamelCase ) == 0: raise ValueError("At least one target must be provided when passed." ) __magic_name__ = np.array(_lowerCamelCase ) return target_ids def __A ( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : int=None ) -> Tuple: __magic_name__ = {} if targets is not None: __magic_name__ = self.get_target_ids(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = target_ids if top_k is not None: __magic_name__ = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__( self : int , _lowerCamelCase : Any , *_lowerCamelCase : str , **_lowerCamelCase : int ) -> Optional[int]: __magic_name__ = super().__call__(_lowerCamelCase , **_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1: return outputs[0] return outputs
664
1
'''simple docstring''' from math import ceil, sqrt def __snake_case ( lowerCamelCase_ : int = 100_0000 ): '''simple docstring''' __magic_name__ = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: __magic_name__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: __magic_name__ = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F'''{solution() = }''')
664
'''simple docstring''' from __future__ import annotations def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ): '''simple docstring''' if len(lowerCamelCase_ ) < k or k < 0: raise ValueError("Invalid Input" ) __magic_name__ = __magic_name__ = sum(array[:k] ) for i in range(len(lowerCamelCase_ ) - k ): __magic_name__ = current_sum - array[i] + array[i + k] __magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)] __magic_name__ : List[str] =randint(0, 1_10) print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
664
1
'''simple docstring''' def __snake_case ( lowerCamelCase_ : list ): '''simple docstring''' __magic_name__ = 0 while len(lowerCamelCase_ ) > 1: __magic_name__ = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): __magic_name__ = files.index(min(lowerCamelCase_ ) ) temp += files[min_index] files.pop(lowerCamelCase_ ) files.append(lowerCamelCase_ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
664
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ : int =logging.get_logger(__name__) __magic_name__ : List[Any] ={} class UpperCamelCase_ ( A ): """simple docstring""" UpperCAmelCase__ : int = '''llama''' UpperCAmelCase__ : Any = ['''past_key_values'''] def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any: __magic_name__ = vocab_size __magic_name__ = max_position_embeddings __magic_name__ = hidden_size __magic_name__ = intermediate_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads # for backward compatibility if num_key_value_heads is None: __magic_name__ = num_attention_heads __magic_name__ = num_key_value_heads __magic_name__ = hidden_act __magic_name__ = initializer_range __magic_name__ = rms_norm_eps __magic_name__ = pretraining_tp __magic_name__ = use_cache __magic_name__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , ) def __A ( self : Union[str, Any] ) -> List[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'got {self.rope_scaling}' ) __magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase ) __magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
664
1
'''simple docstring''' __magic_name__ : List[Any] ={ 'meter': 'm', 'kilometer': 'km', 'megametre': 'Mm', 'gigametre': 'Gm', 'terametre': 'Tm', 'petametre': 'Pm', 'exametre': 'Em', 'zettametre': 'Zm', 'yottametre': 'Ym', } # Exponent of the factor(meter) __magic_name__ : int ={ 'm': 0, 'km': 3, 'Mm': 6, 'Gm': 9, 'Tm': 12, 'Pm': 15, 'Em': 18, 'Zm': 21, 'Ym': 24, } def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : str , lowerCamelCase_ : str ): '''simple docstring''' __magic_name__ = from_type.lower().strip("s" ) __magic_name__ = to_type.lower().strip("s" ) __magic_name__ = UNIT_SYMBOL.get(lowerCamelCase_ , lowerCamelCase_ ) __magic_name__ = UNIT_SYMBOL.get(lowerCamelCase_ , lowerCamelCase_ ) if from_sanitized not in METRIC_CONVERSION: __magic_name__ = ( F'Invalid \'from_type\' value: {from_type!r}.\n' F'Conversion abbreviations are: {", ".join(lowerCamelCase_ )}' ) raise ValueError(lowerCamelCase_ ) if to_sanitized not in METRIC_CONVERSION: __magic_name__ = ( F'Invalid \'to_type\' value: {to_type!r}.\n' F'Conversion abbreviations are: {", ".join(lowerCamelCase_ )}' ) raise ValueError(lowerCamelCase_ ) __magic_name__ = METRIC_CONVERSION[from_sanitized] __magic_name__ = METRIC_CONVERSION[to_sanitized] __magic_name__ = 1 if from_exponent > to_exponent: __magic_name__ = from_exponent - to_exponent else: __magic_name__ = -(to_exponent - from_exponent) return value * pow(10 , lowerCamelCase_ ) if __name__ == "__main__": from doctest import testmod testmod()
664
'''simple docstring''' __magic_name__ : Dict =8.3_1_4_4_6_2 # Unit - J mol-1 K-1 def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ): '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def __snake_case ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ): '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
664
1
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase_ ( A , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = XLMTokenizer UpperCAmelCase__ : Any = False def __A ( self : int ) -> Union[str, Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __magic_name__ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] __magic_name__ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) __magic_name__ = ["l o 123", "lo w 1456", "e r</w> 1789", ""] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(_lowerCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(_lowerCamelCase ) ) def __A ( self : Optional[int] , _lowerCamelCase : Optional[Any] ) -> List[Any]: __magic_name__ = "lower newer" __magic_name__ = "lower newer" return input_text, output_text def __A ( self : int ) -> List[str]: __magic_name__ = XLMTokenizer(self.vocab_file , self.merges_file ) __magic_name__ = "lower" __magic_name__ = ["low", "er</w>"] __magic_name__ = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) __magic_name__ = tokens + ["<unk>"] __magic_name__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) @slow def __A ( self : List[str] ) -> List[Any]: __magic_name__ = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) __magic_name__ = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCamelCase ) __magic_name__ = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCamelCase ) __magic_name__ = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) __magic_name__ = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
664
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask __magic_name__ : List[Any] =logging.getLogger(__name__) class UpperCamelCase_ ( A ): """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : str=-1 ) -> List[str]: # in NER datasets, the last column is usually reserved for NER label __magic_name__ = label_idx def __A ( self : Any , _lowerCamelCase : str , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __magic_name__ = mode.value __magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' ) __magic_name__ = 1 __magic_name__ = [] with open(_lowerCamelCase , encoding="utf-8" ) as f: __magic_name__ = [] __magic_name__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) ) guid_index += 1 __magic_name__ = [] __magic_name__ = [] else: __magic_name__ = line.split(" " ) words.append(splits[0] ) if len(_lowerCamelCase ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) ) return examples def __A ( self : Optional[Any] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Union[str, Any]: __magic_name__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(_lowerCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: __magic_name__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(_lowerCamelCase ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def __A ( self : Tuple , _lowerCamelCase : str ) -> List[str]: if path: with open(_lowerCamelCase , "r" ) as f: __magic_name__ = f.read().splitlines() if "O" not in labels: __magic_name__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCamelCase_ ( A ): """simple docstring""" def __init__( self : int ) -> str: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __A ( self : int , _lowerCamelCase : str ) -> List[str]: if path: with open(_lowerCamelCase , "r" ) as f: __magic_name__ = f.read().splitlines() if "O" not in labels: __magic_name__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCamelCase_ ( A ): """simple docstring""" def __A ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[Split, str] ) -> List[InputExample]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __magic_name__ = mode.value __magic_name__ = os.path.join(_lowerCamelCase , f'{mode}.txt' ) __magic_name__ = 1 __magic_name__ = [] with open(_lowerCamelCase , encoding="utf-8" ) as f: for sentence in parse_incr(_lowerCamelCase ): __magic_name__ = [] __magic_name__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=_lowerCamelCase , labels=_lowerCamelCase ) ) guid_index += 1 return examples def __A ( self : Optional[int] , _lowerCamelCase : TextIO , _lowerCamelCase : TextIO , _lowerCamelCase : List ) -> Any: __magic_name__ = 0 for sentence in parse_incr(_lowerCamelCase ): __magic_name__ = preds_list[example_id] __magic_name__ = "" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(_lowerCamelCase ) example_id += 1 def __A ( self : Dict , _lowerCamelCase : str ) -> List[str]: if path: with open(_lowerCamelCase , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
664
1