code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a_ = ['''gpt2'''] a_ = '''gpt2''' if is_tf_available(): class lowercase__ ( tf.Module ): def __init__( self , __UpperCAmelCase )-> int: '''simple docstring''' super().__init__() lowerCAmelCase__ = tokenizer lowerCAmelCase__ = AutoConfig.from_pretrained(__a ) lowerCAmelCase__ = TFGPTaLMHeadModel.from_config(__a ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Dict: '''simple docstring''' lowerCAmelCase__ = self.tokenizer(__a ) lowerCAmelCase__ = tokenized["input_ids"].to_tensor() lowerCAmelCase__ = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) lowerCAmelCase__ = self.model(input_ids=__a , attention_mask=__a )["logits"] return outputs @require_tf @require_keras_nlp class lowercase__ ( unittest.TestCase ): def UpperCAmelCase ( self )-> Any: '''simple docstring''' super().setUp() lowerCAmelCase__ = [GPTaTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS)] lowerCAmelCase__ = [TFGPTaTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowerCAmelCase__ = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] lowerCAmelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def UpperCAmelCase ( self )-> Dict: '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: lowerCAmelCase__ = tokenizer([test_inputs] , return_tensors="tf" ) lowerCAmelCase__ = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors lowerCAmelCase__ = python_outputs[key].numpy() lowerCAmelCase__ = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__a , tf.intaa ) == tf_outputs_values ) ) @slow def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase__ = tf.function(__a ) for test_inputs in self.test_sentences: lowerCAmelCase__ = tf.constant(__a ) lowerCAmelCase__ = compiled_tokenizer(__a ) lowerCAmelCase__ = tf_tokenizer(__a ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCAmelCase ( self )-> str: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase__ = ModelToSave(tokenizer=__a ) lowerCAmelCase__ = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase__ = model.serving(__a ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowerCAmelCase__ = Path(__a ) / "saved.model" tf.saved_model.save(__a , __a , signatures={"serving_default": model.serving} ) lowerCAmelCase__ = tf.saved_model.load(__a ) lowerCAmelCase__ = loaded_model.signatures["serving_default"](__a )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def UpperCAmelCase ( self )-> Union[str, Any]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase__ = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase__ = tf_tokenizer(__a ) # Build model with some sample inputs lowerCAmelCase__ = tf_tokenizer.get_config() lowerCAmelCase__ = TFGPTaTokenizer.from_config(__a ) lowerCAmelCase__ = model_from_config(__a ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def UpperCAmelCase ( self )-> int: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run lowerCAmelCase__ = 123123 for max_length in [3, 5, 1024]: lowerCAmelCase__ = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase__ = tf_tokenizer(__a , max_length=__a ) lowerCAmelCase__ = out["input_ids"].numpy().shape[1] assert out_length == max_length
340
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( a): lowerCamelCase__ = 'vision-encoder-decoder' lowerCamelCase__ = True def __init__( self, **__a): '''simple docstring''' super().__init__(**__a) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}") _lowerCAmelCase : str = kwargs.pop("encoder") _lowerCAmelCase : Any = encoder_config.pop("model_type") _lowerCAmelCase : str = kwargs.pop("decoder") _lowerCAmelCase : List[str] = decoder_config.pop("model_type") _lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : Optional[int] = True @classmethod def snake_case__ ( cls, __a, __a, **__a): '''simple docstring''' logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config") _lowerCAmelCase : Optional[Any] = True _lowerCAmelCase : str = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = copy.deepcopy(self.__dict__) _lowerCAmelCase : List[str] = self.encoder.to_dict() _lowerCAmelCase : List[str] = self.decoder.to_dict() _lowerCAmelCase : Any = self.__class__.model_type return output class UpperCAmelCase_ ( a): lowerCamelCase__ = version.parse('1.11') @property def snake_case__ ( self): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def snake_case__ ( self): '''simple docstring''' return 1E-4 @property def snake_case__ ( self): '''simple docstring''' return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}}) class UpperCAmelCase_ ( a): @property def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = OrderedDict() _lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"} _lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"} _lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"} return common_inputs def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ): '''simple docstring''' import torch _lowerCAmelCase : Optional[Any] = OrderedDict() _lowerCAmelCase : List[str] = super().generate_dummy_inputs( __a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a) _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape _lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size) _lowerCAmelCase : List[str] = dummy_input.pop("input_ids") _lowerCAmelCase : List[str] = dummy_input.pop("attention_mask") _lowerCAmelCase : Optional[int] = torch.zeros(__a) return common_inputs class UpperCAmelCase_ ( a): @property def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self, __a): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(__a) def snake_case__ ( self, __a, __a, __a = "default"): '''simple docstring''' _lowerCAmelCase : Dict = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
36
0
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures snake_case__ : List[Any] = logging.get_logger(__name__) @dataclass class A_ : lowerCAmelCase__ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} ) lowerCAmelCase__ = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) lowerCAmelCase__ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) lowerCAmelCase__ = field( default=_lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _lowerCAmelCase (self :Optional[int] )-> Any: __A = self.task_name.lower() class A_ ( _lowerCamelCase ): lowerCAmelCase__ = """train""" lowerCAmelCase__ = """dev""" lowerCAmelCase__ = """test""" class A_ ( _lowerCamelCase ): lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 def __init__(self :Tuple , _UpperCamelCase :int , _UpperCamelCase :Optional[Any] , _UpperCamelCase :int = None , _UpperCamelCase :Dict = Split.train , _UpperCamelCase :str = None , )-> Optional[int]: warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , __a , ) __A = args __A = glue_processors[args.task_name]() __A = glue_output_modes[args.task_name] if isinstance(__a , __a ): try: __A = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file __A = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) __A = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __A = label_list[2], label_list[1] __A = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __A = cached_features_file + ".lock" with FileLock(__a ): if os.path.exists(__a ) and not args.overwrite_cache: __A = time.time() __A = torch.load(__a ) logger.info( f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(f"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: __A = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __A = self.processor.get_test_examples(args.data_dir ) else: __A = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __A = examples[:limit_length] __A = glue_convert_examples_to_features( __a , __a , max_length=args.max_seq_length , label_list=__a , output_mode=self.output_mode , ) __A = time.time() torch.save(self.features , __a ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__(self :Union[str, Any] )-> Tuple: return len(self.features ) def __getitem__(self :int , _UpperCamelCase :Dict )-> Union[str, Any]: return self.features[i] def _lowerCAmelCase (self :Union[str, Any] )-> List[str]: return self.label_list
117
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class UpperCAmelCase_ ( a): def __get__( self, __a, __a=None): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") _lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__ _lowerCAmelCase : Dict = getattr(__a, __a, __a) if cached is None: _lowerCAmelCase : str = self.fget(__a) setattr(__a, __a, __a) return cached def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Any = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"invalid truth value {val!r}" ) def A ( _lowerCamelCase ): '''simple docstring''' if is_torch_fx_proxy(_lowerCamelCase ): return True if is_torch_available(): import torch if isinstance(_lowerCamelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(_lowerCamelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ): return True return isinstance(_lowerCamelCase , np.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return isinstance(_lowerCamelCase , np.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return _is_numpy(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch return isinstance(_lowerCamelCase , torch.Tensor ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch return isinstance(_lowerCamelCase , torch.device ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_device(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch if isinstance(_lowerCamelCase , _lowerCamelCase ): if hasattr(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ) else: return False return isinstance(_lowerCamelCase , torch.dtype ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import tensorflow as tf return isinstance(_lowerCamelCase , tf.Tensor ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_lowerCamelCase , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(_lowerCamelCase ) return type(_lowerCamelCase ) == tf.Tensor def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import jax.numpy as jnp # noqa: F811 return isinstance(_lowerCamelCase , jnp.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_flax_available() else _is_jax(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' if isinstance(_lowerCamelCase , (dict, UserDict) ): return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple) ): return [to_py_obj(_lowerCamelCase ) for o in obj] elif is_tf_tensor(_lowerCamelCase ): return obj.numpy().tolist() elif is_torch_tensor(_lowerCamelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(_lowerCamelCase ): return np.asarray(_lowerCamelCase ).tolist() elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def A ( _lowerCamelCase ): '''simple docstring''' if isinstance(_lowerCamelCase , (dict, UserDict) ): return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple) ): return np.array(_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): return obj.numpy() elif is_torch_tensor(_lowerCamelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(_lowerCamelCase ): return np.asarray(_lowerCamelCase ) else: return obj class UpperCAmelCase_ ( a): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = fields(self) # Safety and consistency checks if not len(__a): raise ValueError(f"{self.__class__.__name__} has no fields.") if not all(field.default is None for field in class_fields[1:]): raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") _lowerCAmelCase : Dict = getattr(self, class_fields[0].name) _lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(__a): if isinstance(__a, __a): _lowerCAmelCase : Tuple = first_field.items() _lowerCAmelCase : Dict = True else: try: _lowerCAmelCase : Dict = iter(__a) _lowerCAmelCase : Any = True except TypeError: _lowerCAmelCase : Any = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__a): if ( not isinstance(__a, (list, tuple)) or not len(__a) == 2 or not isinstance(element[0], __a) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute _lowerCAmelCase : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"Cannot set key/value for {element}. It needs to be a tuple (key, value).") break setattr(self, element[0], element[1]) if element[1] is not None: _lowerCAmelCase : Any = element[1] elif first_field is not None: _lowerCAmelCase : Any = first_field else: for field in class_fields: _lowerCAmelCase : Dict = getattr(self, field.name) if v is not None: _lowerCAmelCase : Union[str, Any] = v def __delitem__( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__( self, __a): '''simple docstring''' if isinstance(__a, __a): _lowerCAmelCase : Optional[int] = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self, __a, __a): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__a, __a) super().__setattr__(__a, __a) def __setitem__( self, __a, __a): '''simple docstring''' super().__setitem__(__a, __a) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__a, __a) def snake_case__ ( self): '''simple docstring''' return tuple(self[k] for k in self.keys()) class UpperCAmelCase_ ( a , a): @classmethod def snake_case__ ( cls, __a): '''simple docstring''' raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}") class UpperCAmelCase_ ( a): lowerCamelCase__ = 'longest' lowerCamelCase__ = 'max_length' lowerCamelCase__ = 'do_not_pad' class UpperCAmelCase_ ( a): lowerCamelCase__ = 'pt' lowerCamelCase__ = 'tf' lowerCamelCase__ = 'np' lowerCamelCase__ = 'jax' class UpperCAmelCase_ : def __init__( self, __a): '''simple docstring''' _lowerCAmelCase : Tuple = context_managers _lowerCAmelCase : Dict = ExitStack() def __enter__( self): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(__a) def __exit__( self, *__a, **__a): '''simple docstring''' self.stack.__exit__(*__a, **__a) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : str = infer_framework(_lowerCamelCase ) if framework == "tf": _lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : str = model_class.__name__ _lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase ) if framework == "tf": _lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ): '''simple docstring''' def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ): for k, v in d.items(): _lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k if v and isinstance(_lowerCamelCase , _lowerCamelCase ): yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items() else: yield key, v return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) @contextmanager def A ( _lowerCamelCase , _lowerCamelCase = False ): '''simple docstring''' if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def A ( _lowerCamelCase , _lowerCamelCase=None ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.transpose(_lowerCamelCase , axes=_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.T if axes is None else array.permute(*_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase ) else: raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.reshape(_lowerCamelCase , _lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.reshape(*_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.reshape(_lowerCamelCase , _lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.reshape(_lowerCamelCase , _lowerCamelCase ) else: raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase=None ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) else: raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.expand_dims(_lowerCamelCase , _lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.unsqueeze(dim=_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase ) else: raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.size(_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.numel() elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.size(_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return array.size else: raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' for key, value in auto_map.items(): if isinstance(_lowerCamelCase , (tuple, list) ): _lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: _lowerCAmelCase : Tuple = F"{repo_id}--{value}" return auto_map def A ( _lowerCamelCase ): '''simple docstring''' for base_class in inspect.getmro(_lowerCamelCase ): _lowerCAmelCase : Tuple = base_class.__module__ _lowerCAmelCase : int = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"Could not infer framework from class {model_class}." )
36
0
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _lowercase (a_ , unittest.TestCase ): '''simple docstring''' lowercase__ = TextToVideoSDPipeline lowercase__ = TEXT_TO_IMAGE_PARAMS lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. lowercase__ = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def _lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) UpperCamelCase_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) UpperCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) UpperCamelCase_ = CLIPTextModel(__a ) UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCamelCase_ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _lowerCamelCase ( self , snake_case__ , snake_case__=0 ): '''simple docstring''' if str(__a ).startswith("mps" ): UpperCamelCase_ = torch.manual_seed(__a ) else: UpperCamelCase_ = torch.Generator(device=__a ).manual_seed(__a ) UpperCamelCase_ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCamelCase_ = self.get_dummy_components() UpperCamelCase_ = TextToVideoSDPipeline(**__a ) UpperCamelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCamelCase_ = self.get_dummy_inputs(__a ) UpperCamelCase_ = "np" UpperCamelCase_ = sd_pipe(**__a ).frames UpperCamelCase_ = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) UpperCamelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCamelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _lowerCamelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a , expected_max_diff=1e-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def _lowerCamelCase ( self ): '''simple docstring''' pass def _lowerCamelCase ( self ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class _lowercase (unittest.TestCase ): '''simple docstring''' def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) UpperCamelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCamelCase_ = pipe.to("cuda" ) UpperCamelCase_ = "Spiderman is surfing" UpperCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase_ = pipe(__a , generator=__a , num_inference_steps=25 , output_type="pt" ).frames UpperCamelCase_ = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) UpperCamelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) UpperCamelCase_ = pipe.to("cuda" ) UpperCamelCase_ = "Spiderman is surfing" UpperCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase_ = pipe(__a , generator=__a , num_inference_steps=2 , output_type="pt" ).frames UpperCamelCase_ = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
128
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if expected is RuntimeError: with pytest.raises(_lowerCamelCase ): _number_of_shards_in_gen_kwargs(_lowerCamelCase ) else: _lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase ) assert out == expected
36
0
import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]: return np.dot(_lowerCamelCase , _lowerCamelCase ) class __snake_case : def __init__( self ,*, snake_case = np.inf ,snake_case = "linear" ,snake_case = 0.0 ,): '''simple docstring''' lowercase : str = regularization lowercase : Union[str, Any] = gamma if kernel == "linear": lowercase : Any = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("""rbf kernel requires gamma""" ) if not isinstance(self.gamma ,(float, int) ): raise ValueError("""gamma must be float or int""" ) if not self.gamma > 0: raise ValueError("""gamma must be > 0""" ) lowercase : Optional[Any] = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: lowercase : Union[str, Any] = f"Unknown kernel: {kernel}" raise ValueError(__a ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' return np.dot(__a ,__a ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' lowercase : Optional[Any] = observations lowercase : Optional[int] = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations (lowercase ) : str = np.shape(__a ) def to_minimize(snake_case ) -> float: lowercase : Tuple = 0 (lowercase ) : Tuple = np.shape(__a ) for i in range(__a ): for j in range(__a ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] ,observations[j] ) ) return 1 / 2 * s - sum(__a ) lowercase : str = LinearConstraint(__a ,0 ,0 ) lowercase : int = Bounds(0 ,self.regularization ) lowercase : int = minimize( __a ,np.ones(__a ) ,bounds=__a ,constraints=[ly_contraint] ).x lowercase : Any = l_star # calculating mean offset of separation plane to points lowercase : int = 0 for i in range(__a ): for j in range(__a ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] ,observations[j] ) lowercase : Any = s / n def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : Optional[Any] = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] ,__a ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
20
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class UpperCAmelCase_ : def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"): '''simple docstring''' _lowerCAmelCase : Optional[int] = device _lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a) _lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073] _lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std) _lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224) _lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.resize(__a) _lowerCAmelCase : List[str] = self.center_crop(__a) _lowerCAmelCase : Optional[Any] = self.normalize(__a) return images def __call__( self, __a=None, __a=None, **__a): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(text=__a, **__a) _lowerCAmelCase : List[str] = self.preprocess_img(__a) _lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class UpperCAmelCase_ ( nn.Module): def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ): '''simple docstring''' super().__init__() _lowerCAmelCase : List[str] = None _lowerCAmelCase : List[str] = device if device else get_device() if vqgan: _lowerCAmelCase : Union[str, Any] = vqgan else: _lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a) self.vqgan.eval() if clip: _lowerCAmelCase : str = clip else: _lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") self.clip.to(self.device) _lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device) _lowerCAmelCase : Any = iterations _lowerCAmelCase : List[Any] = lr _lowerCAmelCase : Tuple = log _lowerCAmelCase : List[str] = make_grid _lowerCAmelCase : int = return_val _lowerCAmelCase : Dict = quantize _lowerCAmelCase : Any = self.vqgan.decoder.z_shape def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [] if output_path is None: _lowerCAmelCase : List[Any] = "./animation.gif" if input_path is None: _lowerCAmelCase : str = self.save_path _lowerCAmelCase : str = sorted(glob(input_path + "/*")) if not len(__a): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)") if len(__a) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)") _lowerCAmelCase : Optional[int] = total_duration / len(__a) _lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a) if extend_frames: _lowerCAmelCase : Any = 1.5 _lowerCAmelCase : List[str] = 3 for file_name in paths: if file_name.endswith(".png"): images.append(imageio.imread(__a)) imageio.mimsave(__a, __a, duration=__a) print(f"gif saved to {output_path}") def snake_case__ ( self, __a=None, __a=None): '''simple docstring''' if not (path or img): raise ValueError("Input either path or tensor") if img is not None: raise NotImplementedError _lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device) _lowerCAmelCase : Dict = preprocess_vqgan(__a) _lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a) return z def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_() _lowerCAmelCase : Dict = base_latent + transform_vector if self.quantize: _lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a) else: _lowerCAmelCase : Any = trans_latent return self.vqgan.decode(__a) def snake_case__ ( self, __a, __a, __a=None): '''simple docstring''' _lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a) _lowerCAmelCase : Optional[int] = self.clip(**__a) _lowerCAmelCase : Any = clip_outputs.logits_per_image if weights is not None: _lowerCAmelCase : Tuple = similarity_logits * weights return similarity_logits.sum() def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"])) if neg_prompts: _lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"]) else: _lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device) _lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a) return loss def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device) _lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr) for i in range(self.iterations): optim.zero_grad() _lowerCAmelCase : Any = self._add_vector(__a) _lowerCAmelCase : Optional[Any] = loop_post_process(__a) _lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a) print("CLIP loss", __a) if self.log: wandb.log({"CLIP Loss": clip_loss}) clip_loss.backward(retain_graph=__a) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def snake_case__ ( self, __a, __a, __a): '''simple docstring''' wandb.init(reinit=__a, project="face-editor") wandb.config.update({"Positive Prompts": positive_prompts}) wandb.config.update({"Negative Prompts": negative_prompts}) wandb.config.update({"lr": self.lr, "iterations": self.iterations}) if image_path: _lowerCAmelCase : str = Image.open(__a) _lowerCAmelCase : int = image.resize((256, 256)) wandb.log("Original Image", wandb.Image(__a)) def snake_case__ ( self, __a): '''simple docstring''' if not prompts: return [] _lowerCAmelCase : int = [] _lowerCAmelCase : List[str] = [] if isinstance(__a, __a): _lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")] for prompt in prompts: if isinstance(__a, (tuple, list)): _lowerCAmelCase : Optional[Any] = prompt[0] _lowerCAmelCase : Union[str, Any] = float(prompt[1]) elif ":" in prompt: _lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":") _lowerCAmelCase : Optional[Any] = float(__a) else: _lowerCAmelCase : Optional[int] = prompt _lowerCAmelCase : List[Any] = 1.0 processed_prompts.append(__a) weights.append(__a) return { "prompts": processed_prompts, "weights": torch.tensor(__a, device=self.device), } def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ): '''simple docstring''' if image_path: _lowerCAmelCase : List[Any] = self._get_latent(__a) else: _lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device) if self.log: self._init_logging(__a, __a, __a) assert pos_prompts, "You must provide at least one positive prompt." _lowerCAmelCase : int = self.process_prompts(__a) _lowerCAmelCase : List[str] = self.process_prompts(__a) if save_final and save_path is None: _lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"])) if not os.path.exists(__a): os.makedirs(__a) else: _lowerCAmelCase : Tuple = save_path + "_" + get_timestamp() os.makedirs(__a) _lowerCAmelCase : Tuple = save_path _lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0] if show_intermediate: print("Original Image") show_pil(custom_to_pil(__a)) _lowerCAmelCase : int = loop_post_process(__a) for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)): if show_intermediate: show_pil(__a) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png")) if self.log: wandb.log({"Image": wandb.Image(__a)}) if show_final: show_pil(__a) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
36
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a__ : int = { '''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ '''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoForCausalLM''', '''GPTNeoForQuestionAnswering''', '''GPTNeoForSequenceClassification''', '''GPTNeoForTokenClassification''', '''GPTNeoModel''', '''GPTNeoPreTrainedModel''', '''load_tf_weights_in_gpt_neo''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[Any] = [ '''FlaxGPTNeoForCausalLM''', '''FlaxGPTNeoModel''', '''FlaxGPTNeoPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys a__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
54
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 _snake_case = get_tests_dir("fixtures") class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = mock.Mock() _lowerCAmelCase : int = 500 _lowerCAmelCase : Tuple = {} _lowerCAmelCase : str = HTTPError _lowerCAmelCase : Union[str, Any] = {} # Download this model to make sure it's in the cache. _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=__a) as mock_head: _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # This check we did call the fake head request mock_head.assert_called() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json") def snake_case__ ( self): '''simple docstring''' with self.assertRaises(__a): # config is in subfolder, the following should not work without specifying the subfolder _lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants") _lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor") self.assertIsNotNone(__a) @is_staging_test class UpperCAmelCase_ ( unittest.TestCase): @classmethod def snake_case__ ( cls): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = TOKEN HfFolder.save_token(__a) @classmethod def snake_case__ ( cls): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="test-image-processor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-image-processor") except HTTPError: pass def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-image-processor", use_auth_token=self._token) _lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token) _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' CustomImageProcessor.register_for_auto_class() _lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) _lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=__a) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
36
0
"""simple docstring""" import math def __lowerCamelCase ( __UpperCamelCase ) -> Dict: """simple docstring""" lowerCAmelCase_ : Optional[Any] = [True] * n lowerCAmelCase_ : Union[str, Any] = False lowerCAmelCase_ : Optional[Any] = False lowerCAmelCase_ : Tuple = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): lowerCAmelCase_ : str = i * 2 while index < n: lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[int] = index + i lowerCAmelCase_ : Any = [2] for i in range(3 , _lowerCamelCase , 2 ): if is_prime[i]: primes.append(_lowerCamelCase ) return primes def __lowerCamelCase ( __UpperCamelCase = 999966663333 ) -> int: """simple docstring""" lowerCAmelCase_ : Optional[int] = math.floor(math.sqrt(_lowerCamelCase ) ) + 100 lowerCAmelCase_ : str = prime_sieve(_lowerCamelCase ) lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : List[Any] = primes[prime_index] while (last_prime**2) <= limit: lowerCAmelCase_ : Tuple = primes[prime_index + 1] lowerCAmelCase_ : List[Any] = last_prime**2 lowerCAmelCase_ : Dict = next_prime**2 # Get numbers divisible by lps(current) lowerCAmelCase_ : List[Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) lowerCAmelCase_ : List[Any] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps lowerCAmelCase_ : Optional[int] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair lowerCAmelCase_ : Optional[int] = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
241
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : List[str] = batch_size _lowerCAmelCase : int = seq_length _lowerCAmelCase : Optional[int] = is_training _lowerCAmelCase : Dict = use_input_mask _lowerCAmelCase : List[str] = use_token_type_ids _lowerCAmelCase : str = use_labels _lowerCAmelCase : Optional[Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : List[Any] = num_hidden_layers _lowerCAmelCase : Optional[Any] = num_attention_heads _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : List[str] = hidden_act _lowerCAmelCase : Union[str, Any] = hidden_dropout_prob _lowerCAmelCase : Any = attention_probs_dropout_prob _lowerCAmelCase : int = max_position_embeddings _lowerCAmelCase : Optional[int] = type_vocab_size _lowerCAmelCase : Optional[Any] = type_sequence_label_size _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : List[Any] = num_labels _lowerCAmelCase : Tuple = scope _lowerCAmelCase : str = range_bbox def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) _lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCAmelCase : Dict = bbox[i, j, 3] _lowerCAmelCase : int = bbox[i, j, 1] _lowerCAmelCase : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCAmelCase : str = bbox[i, j, 2] _lowerCAmelCase : List[Any] = bbox[i, j, 0] _lowerCAmelCase : str = t _lowerCAmelCase : Optional[Any] = None if self.use_input_mask: _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) _lowerCAmelCase : Dict = None if self.use_token_type_ids: _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Optional[Any] = None if self.use_labels: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size) _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels) _lowerCAmelCase : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def snake_case__ ( self): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = LiltModel(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a) _lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a) _lowerCAmelCase : List[Any] = model(__a, bbox=__a) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Dict = model( __a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Tuple = model( __a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Dict = config_and_inputs _lowerCAmelCase : List[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class UpperCAmelCase_ ( a , a , a , unittest.TestCase): lowerCamelCase__ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def snake_case__ ( self, __a, __a, __a, __a, __a): '''simple docstring''' return True def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = LiltModelTester(self) _lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37) def snake_case__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase : Any = type self.model_tester.create_and_check_model(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) @slow def snake_case__ ( self): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : str = LiltModel.from_pretrained(__a) self.assertIsNotNone(__a) @require_torch @slow class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a) _lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a) _lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a) # forward pass with torch.no_grad(): _lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a) _lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768]) _lowerCAmelCase : List[str] = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, ) self.assertTrue(outputs.last_hidden_state.shape, __a) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
36
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class a__ ( lowerCamelCase_ ): _SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer'] _SCREAMING_SNAKE_CASE : List[str] = 'ViltImageProcessor' _SCREAMING_SNAKE_CASE : Optional[Any] = ('BertTokenizer', 'BertTokenizerFast') def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ): """simple docstring""" _lowercase : int = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __a , ) _lowercase : int = kwargs.pop("feature_extractor" ) _lowercase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__a , __a ) _lowercase : int = self.image_processor def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ): """simple docstring""" _lowercase : Dict = self.tokenizer( text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , ) # add pixel_values + pixel_mask _lowercase : List[Any] = self.image_processor(__a , return_tensors=__a ) encoding.update(__a ) return encoding def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*__a , **__a ) def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*__a , **__a ) @property def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Dict = self.tokenizer.model_input_names _lowercase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _lowerCamelCase ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , ) return self.image_processor_class @property def _lowerCamelCase ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , ) return self.image_processor
250
import argparse import copy def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = {} with open(_lowerCamelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _lowerCAmelCase : Tuple = [] _list.append([line.split()[1], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _lowerCAmelCase : str = [] _list.append([line.split()[0], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' with open(_lowerCamelCase ) as f: _lowerCAmelCase : str = f.read(1 ) _lowerCAmelCase : str = start_node _lowerCAmelCase : List[str] = [] _lowerCAmelCase : Any = start_node _lowerCAmelCase : str = 0 while visiting not in first_solution: _lowerCAmelCase : Dict = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution: _lowerCAmelCase : List[str] = k[1] _lowerCAmelCase : List[Any] = k[0] first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase ) _lowerCAmelCase : str = best_node first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 _lowerCAmelCase : Tuple = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = [] for n in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) for kn in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) if n == kn: continue _lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase ) _lowerCAmelCase : int = kn _lowerCAmelCase : Dict = n _lowerCAmelCase : Optional[int] = 0 for k in _tmp[:-1]: _lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: _lowerCAmelCase : Optional[Any] = distance + int(i[1] ) _tmp.append(_lowerCamelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) _lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = 1 _lowerCAmelCase : int = first_solution _lowerCAmelCase : Tuple = [] _lowerCAmelCase : Tuple = distance_of_first_solution _lowerCAmelCase : Optional[int] = solution while count <= iters: _lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Dict = neighborhood[index_of_best_solution] _lowerCAmelCase : int = len(_lowerCamelCase ) - 1 _lowerCAmelCase : Union[str, Any] = False while not found: _lowerCAmelCase : Tuple = 0 while i < len(_lowerCamelCase ): if best_solution[i] != solution[i]: _lowerCAmelCase : str = best_solution[i] _lowerCAmelCase : Tuple = solution[i] break _lowerCAmelCase : int = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) _lowerCAmelCase : Optional[int] = True _lowerCAmelCase : Optional[Any] = best_solution[:-1] _lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: _lowerCAmelCase : Union[str, Any] = cost _lowerCAmelCase : List[Any] = solution else: _lowerCAmelCase : Optional[Any] = index_of_best_solution + 1 _lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution] if len(_lowerCamelCase ) >= size: tabu_list.pop(0 ) _lowerCAmelCase : int = count + 1 return best_solution_ever, best_cost def A ( _lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase : int = generate_neighbours(args.File ) _lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution( args.File , _lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Any = tabu_search( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
36
0
"""simple docstring""" import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __lowerCamelCase = logging.getLogger(__name__) __lowerCamelCase = tf.data.AUTOTUNE def UpperCAmelCase ( ): """simple docstring""" A__ = argparse.ArgumentParser(description='Train a masked language model on TPU.' ) parser.add_argument( '--pretrained_model_config' , type=_lowerCamelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , ) parser.add_argument( '--tokenizer' , type=_lowerCamelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , ) parser.add_argument( '--per_replica_batch_size' , type=_lowerCamelCase , default=8 , help='Batch size per TPU core.' , ) parser.add_argument( '--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , ) parser.add_argument( '--tpu_name' , type=_lowerCamelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , ) parser.add_argument( '--tpu_zone' , type=_lowerCamelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , ) parser.add_argument( '--gcp_project' , type=_lowerCamelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' ) parser.add_argument( '--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , ) parser.add_argument( '--train_dataset' , type=_lowerCamelCase , help='Path to training dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.' , ) parser.add_argument( '--shuffle_buffer_size' , type=_lowerCamelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , ) parser.add_argument( '--eval_dataset' , type=_lowerCamelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`' ' then the dataset will be loaded from a Google Cloud Storage bucket.' , ) parser.add_argument( '--num_epochs' , type=_lowerCamelCase , default=1 , help='Number of epochs to train for.' , ) parser.add_argument( '--learning_rate' , type=_lowerCamelCase , default=1E-4 , help='Learning rate to use for training.' , ) parser.add_argument( '--weight_decay_rate' , type=_lowerCamelCase , default=1E-3 , help='Weight decay rate to use for training.' , ) parser.add_argument( '--max_length' , type=_lowerCamelCase , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , ) parser.add_argument( '--mlm_probability' , type=_lowerCamelCase , default=0.1_5 , help='Fraction of tokens to mask during training.' , ) parser.add_argument('--output_dir' , type=_lowerCamelCase , required=_lowerCamelCase , help='Path to save model checkpoints to.' ) parser.add_argument('--hub_model_id' , type=_lowerCamelCase , help='Model ID to upload to on the Hugging Face Hub.' ) A__ = parser.parse_args() return args def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" try: if args.tpu_name: A__ = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: A__ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( 'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or ' '--gcp_project. When running on a TPU VM, use --tpu_name local.' ) tf.config.experimental_connect_to_cluster(_lowerCamelCase ) tf.tpu.experimental.initialize_tpu_system(_lowerCamelCase ) return tpu def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" A__ = 0 for file in file_list: A__ = file.split('/' )[-1] A__ = re.search(r'-\d+-(\d+)\.tfrecord' , _lowerCamelCase ).group(1 ) A__ = int(_lowerCamelCase ) num_samples += sample_count return num_samples def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ): """simple docstring""" A__ = count_samples(_lowerCamelCase ) A__ = tf.data.Dataset.from_tensor_slices(_lowerCamelCase ) if shuffle: A__ = dataset.shuffle(len(_lowerCamelCase ) ) A__ = tf.data.TFRecordDataset(_lowerCamelCase , num_parallel_reads=_lowerCamelCase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here A__ = dataset.apply(tf.data.experimental.assert_cardinality(_lowerCamelCase ) ) A__ = dataset.map(_lowerCamelCase , num_parallel_calls=_lowerCamelCase ) if shuffle: assert shuffle_buffer_size is not None A__ = dataset.shuffle(args.shuffle_buffer_size ) A__ = dataset.batch(_lowerCamelCase , drop_remainder=_lowerCamelCase ) A__ = dataset.map(_lowerCamelCase , num_parallel_calls=_lowerCamelCase ) A__ = dataset.prefetch(_lowerCamelCase ) return dataset def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" if not args.no_tpu: A__ = initialize_tpu(_lowerCamelCase ) A__ = tf.distribute.TPUStrategy(_lowerCamelCase ) else: A__ = tf.distribute.OneDeviceStrategy(device='/gpu:0' ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' ) A__ = AutoTokenizer.from_pretrained(args.tokenizer ) A__ = AutoConfig.from_pretrained(args.pretrained_model_config ) A__ = tokenizer.vocab_size A__ = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) ) if not training_records: raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' ) A__ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) ) if not eval_records: raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' ) A__ = count_samples(_lowerCamelCase ) A__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) A__ = steps_per_epoch * args.num_epochs with strategy.scope(): A__ = TFAutoModelForMaskedLM.from_config(_lowerCamelCase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built A__ = create_optimizer( num_train_steps=_lowerCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_lowerCamelCase , metrics=['accuracy'] ) def decode_fn(UpperCamelCase__ ): A__ = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_lowerCamelCase , _lowerCamelCase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. A__ = DataCollatorForLanguageModeling( tokenizer=_lowerCamelCase , mlm_probability=args.mlm_probability , mlm=_lowerCamelCase , return_tensors='tf' ) def mask_with_collator(UpperCamelCase__ ): # TF really needs an isin() function A__ = ( ~tf.cast(batch['attention_mask'] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) A__ = data_collator.tf_mask_tokens( batch['input_ids'] , vocab_size=len(_lowerCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowerCamelCase , ) return batch A__ = args.per_replica_batch_size * strategy.num_replicas_in_sync A__ = prepare_dataset( _lowerCamelCase , decode_fn=_lowerCamelCase , mask_fn=_lowerCamelCase , batch_size=_lowerCamelCase , shuffle=_lowerCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , ) A__ = prepare_dataset( _lowerCamelCase , decode_fn=_lowerCamelCase , mask_fn=_lowerCamelCase , batch_size=_lowerCamelCase , shuffle=_lowerCamelCase , ) A__ = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowerCamelCase ) ) model.fit( _lowerCamelCase , validation_data=_lowerCamelCase , epochs=args.num_epochs , callbacks=_lowerCamelCase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __lowerCamelCase = parse_args() main(args)
221
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin _snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = BartphoTokenizer lowerCamelCase__ = False lowerCamelCase__ = True def snake_case__ ( self): '''simple docstring''' super().setUp() _lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"] _lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a)))) _lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"} _lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n") _lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def snake_case__ ( self, **__a): '''simple docstring''' kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = "This is a là test" _lowerCAmelCase : Optional[int] = "This is a<unk><unk> test" return input_text, output_text def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map) _lowerCAmelCase : List[Any] = "This is a là test" _lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split() _lowerCAmelCase : str = tokenizer.tokenize(__a) self.assertListEqual(__a, __a) _lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token] _lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
36
0
def UpperCamelCase ( snake_case__ : Union[str, Any] , snake_case__ : List[str] = 0 ) -> Optional[Any]: UpperCamelCase : List[str] = length or len(_lowerCamelCase ) UpperCamelCase : Any = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: UpperCamelCase : List[str] = list_data[i + 1], list_data[i] UpperCamelCase : Tuple = True return list_data if not swapped else bubble_sort(_lowerCamelCase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
119
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ): _lowerCAmelCase : Tuple = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: _lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple return x _lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Any = output_size # determine new height and width _lowerCAmelCase : List[Any] = output_height / input_height _lowerCAmelCase : Any = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowerCAmelCase : Union[str, Any] = scale_width else: # fit height _lowerCAmelCase : Union[str, Any] = scale_height _lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase ) _lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase ) return (new_height, new_width) class UpperCAmelCase_ ( a): lowerCamelCase__ = ['pixel_values'] def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ): '''simple docstring''' super().__init__(**__a) _lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384} _lowerCAmelCase : Optional[int] = get_size_dict(__a) _lowerCAmelCase : Optional[Any] = do_resize _lowerCAmelCase : Dict = size _lowerCAmelCase : Any = keep_aspect_ratio _lowerCAmelCase : str = ensure_multiple_of _lowerCAmelCase : str = resample _lowerCAmelCase : Dict = do_rescale _lowerCAmelCase : Optional[int] = rescale_factor _lowerCAmelCase : Dict = do_normalize _lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ): '''simple docstring''' _lowerCAmelCase : List[Any] = get_size_dict(__a) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") _lowerCAmelCase : List[Any] = get_resize_output_image_size( __a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, ) return resize(__a, size=__a, resample=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a = None, **__a, ): '''simple docstring''' return rescale(__a, scale=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ): '''simple docstring''' return normalize(__a, mean=__a, std=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ): '''simple docstring''' _lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase : List[Any] = size if size is not None else self.size _lowerCAmelCase : str = get_size_dict(__a) _lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowerCAmelCase : int = resample if resample is not None else self.resample _lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std _lowerCAmelCase : Optional[Any] = make_list_of_images(__a) if not valid_images(__a): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. _lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images] if do_resize: _lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images] if do_rescale: _lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images] if do_normalize: _lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images] _lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images] _lowerCAmelCase : Optional[Any] = {"pixel_values": images} return BatchFeature(data=__a, tensor_type=__a) def snake_case__ ( self, __a, __a = None): '''simple docstring''' _lowerCAmelCase : Optional[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__a) != len(__a): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits") if is_torch_tensor(__a): _lowerCAmelCase : List[Any] = target_sizes.numpy() _lowerCAmelCase : Dict = [] for idx in range(len(__a)): _lowerCAmelCase : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a) _lowerCAmelCase : int = resized_logits[0].argmax(dim=0) semantic_segmentation.append(__a) else: _lowerCAmelCase : Dict = logits.argmax(dim=1) _lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
36
0
"""simple docstring""" from ...processing_utils import ProcessorMixin class UpperCamelCase_ (__A ): __magic_name__ = '''SpeechT5FeatureExtractor''' __magic_name__ = '''SpeechT5Tokenizer''' def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> Tuple: super().__init__(__a , __a ) def __call__( self : int , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = kwargs.pop("audio" , __a ) UpperCAmelCase_ : Dict = kwargs.pop("text" , __a ) UpperCAmelCase_ : Dict = kwargs.pop("text_target" , __a ) UpperCAmelCase_ : Union[str, Any] = kwargs.pop("audio_target" , __a ) UpperCAmelCase_ : Any = kwargs.pop("sampling_rate" , __a ) if audio is not None and text is not None: raise ValueError( "Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" ) if audio_target is not None and text_target is not None: raise ValueError( "Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( "You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." ) if audio is not None: UpperCAmelCase_ : Tuple = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a ) elif text is not None: UpperCAmelCase_ : List[Any] = self.tokenizer(__a , **__a ) else: UpperCAmelCase_ : Dict = None if audio_target is not None: UpperCAmelCase_ : Union[str, Any] = self.feature_extractor(audio_target=__a , *__a , sampling_rate=__a , **__a ) UpperCAmelCase_ : Optional[int] = targets["input_values"] elif text_target is not None: UpperCAmelCase_ : List[Any] = self.tokenizer(__a , **__a ) UpperCAmelCase_ : Union[str, Any] = targets["input_ids"] else: UpperCAmelCase_ : Union[str, Any] = None if inputs is None: return targets if targets is not None: UpperCAmelCase_ : Any = labels UpperCAmelCase_ : List[Any] = targets.get("attention_mask" ) if decoder_attention_mask is not None: UpperCAmelCase_ : Tuple = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Dict ) -> Optional[int]: UpperCAmelCase_ : List[str] = kwargs.pop("input_values" , __a ) UpperCAmelCase_ : int = kwargs.pop("input_ids" , __a ) UpperCAmelCase_ : List[Any] = kwargs.pop("labels" , __a ) if input_values is not None and input_ids is not None: raise ValueError("Cannot process both `input_values` and `input_ids` inputs." ) if input_values is None and input_ids is None and labels is None: raise ValueError( "You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." ) if input_values is not None: UpperCAmelCase_ : List[str] = self.feature_extractor.pad(__a , *__a , **__a ) elif input_ids is not None: UpperCAmelCase_ : Optional[Any] = self.tokenizer.pad(__a , **__a ) else: UpperCAmelCase_ : List[Any] = None if labels is not None: if "input_ids" in labels or (isinstance(__a , __a ) and "input_ids" in labels[0]): UpperCAmelCase_ : str = self.tokenizer.pad(__a , **__a ) UpperCAmelCase_ : str = targets["input_ids"] else: UpperCAmelCase_ : Union[str, Any] = self.feature_extractor.feature_size UpperCAmelCase_ : str = self.feature_extractor.num_mel_bins UpperCAmelCase_ : str = self.feature_extractor.pad(__a , *__a , **__a ) UpperCAmelCase_ : List[Any] = feature_size_hack UpperCAmelCase_ : str = targets["input_values"] else: UpperCAmelCase_ : Optional[Any] = None if inputs is None: return targets if targets is not None: UpperCAmelCase_ : str = labels UpperCAmelCase_ : List[str] = targets.get("attention_mask" ) if decoder_attention_mask is not None: UpperCAmelCase_ : Any = decoder_attention_mask return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[int] ) -> str: return self.tokenizer.batch_decode(*__a , **__a ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Union[str, Any] ) -> int: return self.tokenizer.decode(*__a , **__a )
268
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = "huggingface/label-files" _lowerCAmelCase : int = "imagenet-1k-id2label.json" _lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} _lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" _lowerCAmelCase : Optional[int] = BitConfig( conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , ) return config def A ( _lowerCamelCase ): '''simple docstring''' if "stem.conv" in name: _lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: _lowerCAmelCase : Any = name.replace("blocks" , "layers" ) if "head.fc" in name: _lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): _lowerCAmelCase : Any = "bit." + name if "bit" not in name and "classifier" not in name: _lowerCAmelCase : Dict = "bit.encoder." + name return name def A ( ): '''simple docstring''' _lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ): '''simple docstring''' _lowerCAmelCase : Dict = get_config(_lowerCamelCase ) # load original model from timm _lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() # load state_dict of original model _lowerCAmelCase : Any = timm_model.state_dict() for key in state_dict.copy().keys(): _lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase ) _lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val # load HuggingFace model _lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.eval() model.load_state_dict(_lowerCamelCase ) # create image processor _lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) ) _lowerCAmelCase : Optional[int] = transform.transforms _lowerCAmelCase : Tuple = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _lowerCAmelCase : Tuple = BitImageProcessor( do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 ) _lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) # verify logits with torch.no_grad(): _lowerCAmelCase : Tuple = model(_lowerCamelCase ) _lowerCAmelCase : str = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) _lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(F"Pushing model {model_name} and processor to the hub" ) model.push_to_hub(F"ybelkada/{model_name}" ) processor.push_to_hub(F"ybelkada/{model_name}" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) _snake_case = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
36
0
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' return 0.0 def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowerCamelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) __lowerCamelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> str: """simple docstring""" __lowerCamelCase = 512 __lowerCamelCase = [1] + [0] * (size - 1) __lowerCamelCase = [filter_type.process(_lowerCamelCase ) for item in inputs] __lowerCamelCase = [0] * (samplerate - size) # zero-padding outputs += filler __lowerCamelCase = np.abs(np.fft.fft(_lowerCamelCase ) ) __lowerCamelCase = 20 * np.logaa(_lowerCamelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) # Display within reasonable bounds __lowerCamelCase = get_bounds(_lowerCamelCase , _lowerCamelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('Gain (dB)' ) plt.plot(_lowerCamelCase ) plt.show() def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Optional[int]: """simple docstring""" __lowerCamelCase = 512 __lowerCamelCase = [1] + [0] * (size - 1) __lowerCamelCase = [filter_type.process(_lowerCamelCase ) for item in inputs] __lowerCamelCase = [0] * (samplerate - size) # zero-padding outputs += filler __lowerCamelCase = np.angle(np.fft.fft(_lowerCamelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('Frequency (Hz)' ) plt.xscale('log' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('Phase shift (Radians)' ) plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) ) plt.show()
90
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _snake_case = logging.get_logger(__name__) _snake_case = { "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCAmelCase_ ( a , a): lowerCamelCase__ = 'swin' lowerCamelCase__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ): '''simple docstring''' super().__init__(**__a) _lowerCAmelCase : Any = image_size _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : Tuple = num_channels _lowerCAmelCase : List[Any] = embed_dim _lowerCAmelCase : Tuple = depths _lowerCAmelCase : Optional[Any] = len(__a) _lowerCAmelCase : int = num_heads _lowerCAmelCase : int = window_size _lowerCAmelCase : int = mlp_ratio _lowerCAmelCase : List[Any] = qkv_bias _lowerCAmelCase : str = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : Any = drop_path_rate _lowerCAmelCase : int = hidden_act _lowerCAmelCase : Tuple = use_absolute_embeddings _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : Tuple = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__a) - 1)) _lowerCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1, len(__a) + 1)] _lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices( out_features=__a, out_indices=__a, stage_names=self.stage_names) class UpperCAmelCase_ ( a): lowerCamelCase__ = version.parse('1.11') @property def snake_case__ ( self): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def snake_case__ ( self): '''simple docstring''' return 1E-4
36
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowercase__ ( _UpperCAmelCase ): a_ ="""Salesforce/blip-image-captioning-base""" a_ =( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) a_ ="""image_captioner""" a_ =AutoModelForVisionaSeq a_ =["""image"""] a_ =["""text"""] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase )-> List[Any]: '''simple docstring''' requires_backends(self , ["vision"] ) super().__init__(*__a , **__a ) def UpperCAmelCase ( self , __UpperCAmelCase )-> List[Any]: '''simple docstring''' return self.pre_processor(images=__a , return_tensors="pt" ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple: '''simple docstring''' return self.model.generate(**__a ) def UpperCAmelCase ( self , __UpperCAmelCase )-> int: '''simple docstring''' return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0].strip()
340
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
36
0
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants snake_case__ : List[Any] = Mapping[str, np.ndarray] snake_case__ : List[str] = Mapping[str, Any] # Is a nested dict. snake_case__ : Tuple = 0.01 @dataclasses.dataclass(frozen=_lowerCamelCase ) class A_ : lowerCAmelCase__ = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowerCAmelCase__ = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowerCAmelCase__ = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowerCAmelCase__ = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowerCAmelCase__ = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowerCAmelCase__ = None # Optional remark about the protein. Included as a comment in output PDB # files lowerCAmelCase__ = None # Templates used to generate this protein (prediction-only) lowerCAmelCase__ = None # Chain corresponding to each parent lowerCAmelCase__ = None def _a ( lowerCamelCase: str ) -> Tuple: '''simple docstring''' __A = r"(\[[A-Z]+\]\n)" __A = [tag.strip() for tag in re.split(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) > 0] __A = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) __A = ["N", "CA", "C"] __A = None __A = None __A = None for g in groups: if "[PRIMARY]" == g[0]: __A = g[1][0].strip() for i in range(len(_lowerCamelCase ) ): if seq[i] not in residue_constants.restypes: __A = "X" # FIXME: strings are immutable __A = np.array( [residue_constants.restype_order.get(_lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __A = [] for axis in range(3 ): tertiary.append(list(map(_lowerCamelCase , g[1][axis].split() ) ) ) __A = np.array(_lowerCamelCase ) __A = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(_lowerCamelCase ): __A = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __A = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) __A = np.zeros( ( len(_lowerCamelCase ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(_lowerCamelCase ): __A = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=_lowerCamelCase , atom_mask=_lowerCamelCase , aatype=_lowerCamelCase , residue_index=np.arange(len(_lowerCamelCase ) ) , b_factors=_lowerCamelCase , ) def _a ( lowerCamelCase: Tuple , lowerCamelCase: Dict = 0 ) -> Optional[int]: '''simple docstring''' __A = [] __A = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) __A = prot.parents __A = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __A = [p for i, p in zip(_lowerCamelCase , _lowerCamelCase ) if i == chain_id] if parents is None or len(_lowerCamelCase ) == 0: __A = ["N/A"] pdb_headers.append(F"""PARENT {' '.join(_lowerCamelCase )}""" ) return pdb_headers def _a ( lowerCamelCase: int , lowerCamelCase: int ) -> Any: '''simple docstring''' __A = [] __A = pdb_str.split('''\n''' ) __A = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) __A = 42 if prot.parents is not None and len(prot.parents ) > 0: __A = [] if prot.parents_chain_index is not None: __A = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(_lowerCamelCase ) , [] ) parent_dict[str(_lowerCamelCase )].append(_lowerCamelCase ) __A = max([int(_lowerCamelCase ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __A = parent_dict.get(str(_lowerCamelCase ) , ['''N/A'''] ) parents_per_chain.append(_lowerCamelCase ) else: parents_per_chain.append(list(prot.parents ) ) else: __A = [["N/A"]] def make_parent_line(lowerCamelCase: Any ) -> str: return F"""PARENT {' '.join(_lowerCamelCase )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __A = 0 for i, l in enumerate(_lowerCamelCase ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(_lowerCamelCase ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(_lowerCamelCase ): __A = parents_per_chain[chain_counter] else: __A = ["N/A"] out_pdb_lines.append(make_parent_line(_lowerCamelCase ) ) return "\n".join(_lowerCamelCase ) def _a ( lowerCamelCase: Union[str, Any] ) -> Dict: '''simple docstring''' __A = residue_constants.restypes + ["X"] def res_atoa(lowerCamelCase: str ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) __A = residue_constants.atom_types __A = [] __A = prot.atom_mask __A = prot.aatype __A = prot.atom_positions __A = prot.residue_index.astype(np.intaa ) __A = prot.b_factors __A = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) __A = get_pdb_headers(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: pdb_lines.extend(_lowerCamelCase ) __A = aatype.shape[0] __A = 1 __A = 0 __A = string.ascii_uppercase __A = None # Add all atom sites. for i in range(_lowerCamelCase ): __A = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(_lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __A = "ATOM" __A = atom_name if len(_lowerCamelCase ) == 4 else F""" {atom_name}""" __A = "" __A = "" __A = 1.00 __A = atom_name[0] # Protein supports only C, N, O, S, this works. __A = "" __A = "A" if chain_index is not None: __A = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __A = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(_lowerCamelCase ) atom_index += 1 __A = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __A = True __A = chain_index[i + 1] if should_terminate: # Close the chain. __A = "TER" __A = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(_lowerCamelCase ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(_lowerCamelCase , _lowerCamelCase ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(_lowerCamelCase ) def _a ( lowerCamelCase: int ) -> Optional[Any]: '''simple docstring''' return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _a ( lowerCamelCase: Dict , lowerCamelCase: Tuple , lowerCamelCase: int = None , lowerCamelCase: Optional[Any] = None , lowerCamelCase: Dict = None , lowerCamelCase: Optional[Any] = None , lowerCamelCase: int = None , ) -> List[Any]: '''simple docstring''' return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=_lowerCamelCase , remark=_lowerCamelCase , parents=_lowerCamelCase , parents_chain_index=_lowerCamelCase , )
117
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version _snake_case = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if got_ver is None or want_ver is None: raise ValueError( F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" F" reinstalling {pkg}." ) if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ): raise ImportError( F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def A ( _lowerCamelCase , _lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None else: _lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" F" got {requirement}" ) _lowerCAmelCase , _lowerCAmelCase : Dict = match[0] _lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements _lowerCAmelCase : Optional[int] = {} for w in want_range: _lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," F" but got {requirement}" ) _lowerCAmelCase , _lowerCAmelCase : Tuple = match[0] _lowerCAmelCase : Union[str, Any] = want_ver if op not in ops: raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" ) # special case if pkg == "python": _lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return # check if any version is installed try: _lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(_lowerCamelCase , _lowerCamelCase )
36
0
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase): if height >= 1: move_tower(height - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) move_disk(_lowerCamelCase , _lowerCamelCase) move_tower(height - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase): print("moving disk from" , _lowerCamelCase , "to" , _lowerCamelCase) def _lowerCAmelCase (): UpperCamelCase_ = int(input("Height of hanoi: ").strip()) move_tower(_lowerCamelCase , "A" , "B" , "C") if __name__ == "__main__": main()
128
import argparse from collections import defaultdict import yaml _snake_case = "docs/source/en/_toctree.yml" def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = defaultdict(_lowerCamelCase ) _lowerCAmelCase : Any = [] _lowerCAmelCase : List[str] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(_lowerCamelCase ) _lowerCAmelCase : Optional[Any] = new_doc_list _lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] _lowerCAmelCase : str = [] for duplicate_key in duplicates: _lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(_lowerCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) _lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_lowerCamelCase ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(_lowerCamelCase ) # Sort return overview_doc def A ( _lowerCamelCase=False ): '''simple docstring''' with open(_lowerCamelCase , encoding="utf-8" ) as f: _lowerCAmelCase : int = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase : List[str] = content[api_idx]["sections"] # Then to the model doc _lowerCAmelCase : Union[str, Any] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"] _lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase ) _lowerCAmelCase : int = False if new_scheduler_doc != scheduler_doc: _lowerCAmelCase : List[Any] = True if overwrite: _lowerCAmelCase : Dict = new_scheduler_doc if diff: if overwrite: _lowerCAmelCase : Tuple = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def A ( _lowerCamelCase=False ): '''simple docstring''' with open(_lowerCamelCase , encoding="utf-8" ) as f: _lowerCAmelCase : Tuple = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase : int = content[api_idx]["sections"] # Then to the model doc _lowerCAmelCase : List[str] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _lowerCAmelCase : Dict = False _lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"] _lowerCAmelCase : Tuple = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _lowerCAmelCase : List[Any] = pipeline_doc["section"] _lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase ) if overwrite: _lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc new_pipeline_docs.append(_lowerCamelCase ) # sort overall pipeline doc _lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase ) if new_pipeline_docs != pipeline_docs: _lowerCAmelCase : Dict = True if overwrite: _lowerCAmelCase : Optional[int] = new_pipeline_docs if diff: if overwrite: _lowerCAmelCase : Optional[int] = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _snake_case = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
36
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase : List[Any] = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Union[str, Any] = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
20
def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
36
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Optional[int] = logging.get_logger(__name__) a__ : Dict = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Tuple = "wav2vec2" def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any]=3_2 , UpperCAmelCase__ : Dict=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : List[Any]=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[Any]=1E-5 , UpperCAmelCase__ : Optional[int]="group" , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase__ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase__ : str=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[str]=1_2_8 , UpperCAmelCase__ : Optional[int]=1_6 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=0.05 , UpperCAmelCase__ : Tuple=1_0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Union[str, Any]=1_0 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : str=3_2_0 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=1_0_0 , UpperCAmelCase__ : str=2_5_6 , UpperCAmelCase__ : List[Any]=2_5_6 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Dict="sum" , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : str=2_5_6 , UpperCAmelCase__ : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase__ : Any=(5, 3, 3, 1, 1) , UpperCAmelCase__ : Union[str, Any]=(1, 2, 3, 1, 1) , UpperCAmelCase__ : Tuple=5_1_2 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Dict , ) -> Optional[int]: super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a ) __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = feat_extract_norm __SCREAMING_SNAKE_CASE = feat_extract_activation __SCREAMING_SNAKE_CASE = list(__a ) __SCREAMING_SNAKE_CASE = list(__a ) __SCREAMING_SNAKE_CASE = list(__a ) __SCREAMING_SNAKE_CASE = conv_bias __SCREAMING_SNAKE_CASE = num_conv_pos_embeddings __SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups __SCREAMING_SNAKE_CASE = len(self.conv_dim ) __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = feat_proj_dropout __SCREAMING_SNAKE_CASE = final_dropout __SCREAMING_SNAKE_CASE = layerdrop __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = do_stable_layer_norm __SCREAMING_SNAKE_CASE = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __SCREAMING_SNAKE_CASE = apply_spec_augment __SCREAMING_SNAKE_CASE = mask_time_prob __SCREAMING_SNAKE_CASE = mask_time_length __SCREAMING_SNAKE_CASE = mask_time_min_masks __SCREAMING_SNAKE_CASE = mask_feature_prob __SCREAMING_SNAKE_CASE = mask_feature_length __SCREAMING_SNAKE_CASE = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __SCREAMING_SNAKE_CASE = num_codevectors_per_group __SCREAMING_SNAKE_CASE = num_codevector_groups __SCREAMING_SNAKE_CASE = contrastive_logits_temperature __SCREAMING_SNAKE_CASE = feat_quantizer_dropout __SCREAMING_SNAKE_CASE = num_negatives __SCREAMING_SNAKE_CASE = codevector_dim __SCREAMING_SNAKE_CASE = proj_codevector_dim __SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss __SCREAMING_SNAKE_CASE = ctc_loss_reduction __SCREAMING_SNAKE_CASE = ctc_zero_infinity # adapter __SCREAMING_SNAKE_CASE = add_adapter __SCREAMING_SNAKE_CASE = adapter_kernel_size __SCREAMING_SNAKE_CASE = adapter_stride __SCREAMING_SNAKE_CASE = num_adapter_layers __SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size __SCREAMING_SNAKE_CASE = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. __SCREAMING_SNAKE_CASE = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __SCREAMING_SNAKE_CASE = list(__a ) __SCREAMING_SNAKE_CASE = list(__a ) __SCREAMING_SNAKE_CASE = list(__a ) __SCREAMING_SNAKE_CASE = xvector_output_dim @property def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1 )
54
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging _snake_case = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class UpperCAmelCase_ ( a): def __init__( self, __a = 101): '''simple docstring''' _lowerCAmelCase : str = length def __len__( self): '''simple docstring''' return self.length def __getitem__( self, __a): '''simple docstring''' return i class UpperCAmelCase_ : def __call__( self, __a): '''simple docstring''' return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)} class UpperCAmelCase_ ( nn.Module): def __init__( self): '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. _lowerCAmelCase : str = nn.Linear(120, 80) def snake_case__ ( self, __a, __a=None): '''simple docstring''' if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids class UpperCAmelCase_ ( a): @require_torch_neuroncore def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() _lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir() _lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split() _lowerCAmelCase : List[Any] = ["torchrun"] + distributed_args + args execute_subprocess_async(__a, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class UpperCAmelCase_ ( a): @require_torch_multi_gpu def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() _lowerCAmelCase : Any = self.get_auto_remove_tmp_dir() _lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split() _lowerCAmelCase : Any = ["torchrun"] + distributed_args + args execute_subprocess_async(__a, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py _snake_case = HfArgumentParser((TrainingArguments,)) _snake_case = parser.parse_args_into_dataclasses()[0] logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: _snake_case = DummyDataset(dataset_length) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = list(range(len(_lowerCamelCase ) ) ) _lowerCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} _snake_case = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) _snake_case = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _snake_case = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _snake_case = 2 _snake_case = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _snake_case = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _snake_case = None
36
0
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging lowercase__ = logging.get_logger(__name__) def __lowerCamelCase ( __UpperCamelCase=None , __UpperCamelCase=None ) -> Dict: """simple docstring""" return field(default_factory=lambda: default , metadata=_lowerCamelCase ) @dataclass class __lowerCamelCase : '''simple docstring''' a_ : List[str] = list_field( default=[] , metadata={ """help""": ( """Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version""" """ of all available models""" ) } , ) a_ : int = list_field( default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} ) a_ : int = list_field( default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , ) a_ : Any = field( default=A__ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , ) a_ : List[str] = field( default=A__ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , ) a_ : Union[str, Any] = field( default=A__ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} ) a_ : List[Any] = field(default=A__ , metadata={"""help""": """Use FP16 to accelerate inference."""} ) a_ : Tuple = field(default=A__ , metadata={"""help""": """Benchmark training of model"""} ) a_ : Optional[Any] = field(default=A__ , metadata={"""help""": """Verbose memory tracing"""} ) a_ : Tuple = field( default=A__ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , ) a_ : List[str] = field( default=A__ , metadata={ """help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory""" } , ) a_ : int = field(default=A__ , metadata={"""help""": """Trace memory line by line"""} ) a_ : Union[str, Any] = field(default=A__ , metadata={"""help""": """Save result to a CSV file"""} ) a_ : Tuple = field(default=A__ , metadata={"""help""": """Save all print statements in a log file"""} ) a_ : Dict = field(default=A__ , metadata={"""help""": """Whether to print environment information"""} ) a_ : List[Any] = field( default=A__ , metadata={ """help""": ( """Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use""" """ multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled""" """ for debugging / testing and on TPU.""" ) } , ) a_ : List[Any] = field( default=F"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , ) a_ : Tuple = field( default=F"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , ) a_ : List[str] = field( default=F"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , ) a_ : List[Any] = field( default=F"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , ) a_ : Dict = field( default=F"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , ) a_ : Optional[Any] = field( default=F"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , ) a_ : int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} ) a_ : Tuple = field( default=A__ , metadata={ """help""": ( """Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain""" """ model weights.""" ) } , ) def lowerCamelCase ( self : Union[str, Any] ): warnings.warn( f'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils''' " are deprecated in general and it is advised to use external Benchmarking libraries " " to benchmark Transformer models." , __a , ) def lowerCamelCase ( self : Optional[int] ): return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def lowerCamelCase ( self : List[str] ): if len(self.models ) <= 0: raise ValueError( "Please make sure you provide at least one model name / model identifier, *e.g.* `--models" " bert-base-cased` or `args.models = ['bert-base-cased']." ) return self.models @property def lowerCamelCase ( self : Union[str, Any] ): if not self.multi_process: return False elif self.is_tpu: logger.info("Multiprocessing is currently not possible on TPU." ) return False else: return True
241
from __future__ import annotations import bisect def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' if hi < 0: _lowerCAmelCase : int = len(_lowerCamelCase ) while lo < hi: _lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: _lowerCAmelCase : Union[str, Any] = mid + 1 else: _lowerCAmelCase : str = mid return lo def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' if hi < 0: _lowerCAmelCase : str = len(_lowerCamelCase ) while lo < hi: _lowerCAmelCase : Tuple = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: _lowerCAmelCase : Dict = mid + 1 else: _lowerCAmelCase : str = mid return lo def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = 0 _lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1 while left <= right: _lowerCAmelCase : int = left + (right - left) // 2 _lowerCAmelCase : int = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: _lowerCAmelCase : str = midpoint - 1 else: _lowerCAmelCase : Any = midpoint + 1 return None def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase ) if index != len(_lowerCamelCase ) and sorted_collection[index] == item: return index return None def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if right < left: return None _lowerCAmelCase : Optional[int] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 ) else: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase ) if __name__ == "__main__": _snake_case = input("Enter numbers separated by comma:\n").strip() _snake_case = sorted(int(item) for item in user_input.split(",")) _snake_case = int(input("Enter a single number to be found in the list:\n")) _snake_case = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
36
0
'''simple docstring''' import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch _snake_case = True except ImportError: _snake_case = False try: from torch.hub import _get_torch_home _snake_case = _get_torch_home() except ImportError: _snake_case = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) _snake_case = os.path.join(torch_cache_home, 'transformers') _snake_case = 'https://cdn.huggingface.co' _snake_case = 'https://s3.amazonaws.com/models.huggingface.co/bert' _snake_case = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) _snake_case = os.path.join(PATH, 'config.yaml') _snake_case = os.path.join(PATH, 'attributes.txt') _snake_case = os.path.join(PATH, 'objects.txt') _snake_case = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) _snake_case = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) _snake_case = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) _snake_case = 'pytorch_model.bin' _snake_case = 'config.yaml' def _A ( snake_case=OBJECTS , snake_case=ATTRIBUTES ) -> int: _lowercase : Tuple = [] with open(_lowerCamelCase ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) _lowercase : Tuple = [] with open(_lowerCamelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def _A ( snake_case ) -> Any: _lowercase : str = OrderedDict() with open(_lowerCamelCase , "rb" ) as f: _lowercase : Optional[int] = pkl.load(_lowerCamelCase )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): _lowercase : int = ckp.pop(_lowerCamelCase ) if isinstance(_lowerCamelCase , np.ndarray ): _lowercase : Optional[int] = torch.tensor(_lowerCamelCase ) else: assert isinstance(_lowerCamelCase , torch.tensor ), type(_lowerCamelCase ) _lowercase : int = v return r class a__ : _SCREAMING_SNAKE_CASE : str = {} def __init__( self , _UpperCamelCase , _UpperCamelCase = "root" , _UpperCamelCase=0 ): """simple docstring""" _lowercase : str = name _lowercase : List[Any] = level _lowercase : List[str] = {} for k, v in dictionary.items(): if v is None: raise ValueError() _lowercase : Union[str, Any] = copy.deepcopy(__a ) _lowercase : Tuple = copy.deepcopy(__a ) if isinstance(__a , __a ): _lowercase : Any = Config(__a , name=__a , level=level + 1 ) _lowercase : int = v setattr(self , __a , __a ) _lowercase : List[str] = d def __repr__( self ): """simple docstring""" return str(list((self._pointer.keys()) ) ) def __setattr__( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase : Any = val _lowercase : int = val _lowercase : Tuple = key.split("." ) _lowercase : Union[str, Any] = len(__a ) - 1 _lowercase : Any = self._pointer if len(__a ) > 1: for i, l in enumerate(__a ): if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ): setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a ) if l == last_level: _lowercase : Optional[int] = val else: _lowercase : str = pointer[l] def _lowerCamelCase ( self ): """simple docstring""" return self._pointer def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" with open(f'''{file_name}''' , "w" ) as stream: dump(__a , __a ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" with open(f'''{file_name}''' , "w" ) as stream: json.dump(__a , __a ) @staticmethod def _lowerCamelCase ( _UpperCamelCase ): """simple docstring""" with open(__a ) as stream: _lowercase : Dict = load(__a , Loader=__a ) return data def __str__( self ): """simple docstring""" _lowercase : List[str] = " " if self._name != "root": _lowercase : Dict = f'''{t * (self._level-1)}{self._name}:\n''' else: _lowercase : str = "" _lowercase : str = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(__a , __a ): r += f'''{t * (self._level)}{v}\n''' self._level += 1 else: r += f'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n''' _lowercase : Optional[int] = level return r[:-1] @classmethod def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" _lowercase : Tuple = cls.get_config_dict(__a , **__a ) return cls(__a ) @classmethod def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" _lowercase : Optional[int] = kwargs.pop("cache_dir" , __a ) _lowercase : str = kwargs.pop("force_download" , __a ) _lowercase : Optional[Any] = kwargs.pop("resume_download" , __a ) _lowercase : str = kwargs.pop("proxies" , __a ) _lowercase : int = kwargs.pop("local_files_only" , __a ) if os.path.isdir(__a ): _lowercase : str = os.path.join(__a , __a ) elif os.path.isfile(__a ) or is_remote_url(__a ): _lowercase : List[str] = pretrained_model_name_or_path else: _lowercase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a ) try: # Load from URL or cache if already cached _lowercase : Optional[Any] = cached_path( __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , ) # Load config dict if resolved_config_file is None: raise EnvironmentError _lowercase : Tuple = Config.load_yaml(__a ) except EnvironmentError: _lowercase : List[Any] = "Can't load config for" raise EnvironmentError(__a ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(__a ), kwargs def _A ( snake_case ) -> List[str]: _lowercase : Dict = torch.load("dump.pt" , map_location=in_tensor.device ) _lowercase : List[str] = in_tensor.numpy() _lowercase : Optional[Any] = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ), ( F'''{sum([1 for x in np.isclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %''' " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def _A ( snake_case ) -> Union[str, Any]: _lowercase : List[Any] = urlparse(_lowerCamelCase ) return parsed.scheme in ("http", "https") def _A ( snake_case , snake_case , snake_case=True ) -> Optional[int]: _lowercase : str = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX _lowercase : Any = "/" not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def _A ( snake_case , snake_case , snake_case=None , snake_case=0 , snake_case=None , ) -> Any: _lowercase : Optional[int] = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(_lowerCamelCase , _lowerCamelCase ): ua += "; " + "; ".join("{}/{}".format(_lowerCamelCase , _lowerCamelCase ) for k, v in user_agent.items() ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): ua += "; " + user_agent _lowercase : int = {"user-agent": ua} if resume_size > 0: _lowercase : List[Any] = "bytes=%d-" % (resume_size,) _lowercase : Tuple = requests.get(_lowerCamelCase , stream=_lowerCamelCase , proxies=_lowerCamelCase , headers=_lowerCamelCase ) if response.status_code == 4_16: # Range not satisfiable return _lowercase : List[str] = response.headers.get("Content-Length" ) _lowercase : Any = resume_size + int(_lowerCamelCase ) if content_length is not None else None _lowercase : List[Any] = tqdm( unit="B" , unit_scale=_lowerCamelCase , total=_lowerCamelCase , initial=_lowerCamelCase , desc="Downloading" , ) for chunk in response.iter_content(chunk_size=10_24 ): if chunk: # filter out keep-alive new chunks progress.update(len(_lowerCamelCase ) ) temp_file.write(_lowerCamelCase ) progress.close() def _A ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=10 , snake_case=False , snake_case=None , snake_case=False , ) -> Dict: if cache_dir is None: _lowercase : int = TRANSFORMERS_CACHE if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowercase : Any = str(_lowerCamelCase ) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) _lowercase : Union[str, Any] = None if not local_files_only: try: _lowercase : List[Any] = requests.head(_lowerCamelCase , allow_redirects=_lowerCamelCase , proxies=_lowerCamelCase , timeout=_lowerCamelCase ) if response.status_code == 2_00: _lowercase : Any = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass _lowercase : int = url_to_filename(_lowerCamelCase , _lowerCamelCase ) # get cache path to put the file _lowercase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(_lowerCamelCase ): return cache_path else: _lowercase : int = [ file for file in fnmatch.filter(os.listdir(_lowerCamelCase ) , filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(_lowerCamelCase ) > 0: return os.path.join(_lowerCamelCase , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(_lowerCamelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. _lowercase : int = cache_path + ".lock" with FileLock(_lowerCamelCase ): # If the download just completed while the lock was activated. if os.path.exists(_lowerCamelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: _lowercase : List[str] = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(_lowerCamelCase , "a+b" ) as f: yield f _lowercase : Optional[int] = _resumable_file_manager if os.path.exists(_lowerCamelCase ): _lowercase : str = os.stat(_lowerCamelCase ).st_size else: _lowercase : Union[str, Any] = 0 else: _lowercase : int = partial(tempfile.NamedTemporaryFile , dir=_lowerCamelCase , delete=_lowerCamelCase ) _lowercase : List[str] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" , _lowerCamelCase , temp_file.name , ) http_get( _lowerCamelCase , _lowerCamelCase , proxies=_lowerCamelCase , resume_size=_lowerCamelCase , user_agent=_lowerCamelCase , ) os.replace(temp_file.name , _lowerCamelCase ) _lowercase : int = {"url": url, "etag": etag} _lowercase : int = cache_path + ".json" with open(_lowerCamelCase , "w" ) as meta_file: json.dump(_lowerCamelCase , _lowerCamelCase ) return cache_path def _A ( snake_case , snake_case=None ) -> str: _lowercase : List[Any] = url.encode("utf-8" ) _lowercase : Union[str, Any] = shaaaa(_lowerCamelCase ) _lowercase : str = url_hash.hexdigest() if etag: _lowercase : Tuple = etag.encode("utf-8" ) _lowercase : Optional[Any] = shaaaa(_lowerCamelCase ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def _A ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ) -> int: if cache_dir is None: _lowercase : Any = TRANSFORMERS_CACHE if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowercase : int = str(_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowercase : int = str(_lowerCamelCase ) if is_remote_url(_lowerCamelCase ): # URL, so get it from the cache (downloading if necessary) _lowercase : Tuple = get_from_cache( _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , user_agent=_lowerCamelCase , local_files_only=_lowerCamelCase , ) elif os.path.exists(_lowerCamelCase ): # File, and it exists. _lowercase : str = url_or_filename elif urlparse(_lowerCamelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(_lowerCamelCase ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(_lowerCamelCase ) ) if extract_compressed_file: if not is_zipfile(_lowerCamelCase ) and not tarfile.is_tarfile(_lowerCamelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" _lowercase : Tuple = os.path.split(_lowerCamelCase ) _lowercase : Union[str, Any] = output_file.replace("." , "-" ) + "-extracted" _lowercase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isdir(_lowerCamelCase ) and os.listdir(_lowerCamelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions _lowercase : Any = output_path + ".lock" with FileLock(_lowerCamelCase ): shutil.rmtree(_lowerCamelCase , ignore_errors=_lowerCamelCase ) os.makedirs(_lowerCamelCase ) if is_zipfile(_lowerCamelCase ): with ZipFile(_lowerCamelCase , "r" ) as zip_file: zip_file.extractall(_lowerCamelCase ) zip_file.close() elif tarfile.is_tarfile(_lowerCamelCase ): _lowercase : List[Any] = tarfile.open(_lowerCamelCase ) tar_file.extractall(_lowerCamelCase ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(_lowerCamelCase ) ) return output_path_extracted return output_path def _A ( snake_case , snake_case="," ) -> Tuple: assert isinstance(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): with open(_lowerCamelCase ) as f: _lowercase : Optional[int] = eval(f.read() ) else: _lowercase : Union[str, Any] = requests.get(_lowerCamelCase ) try: _lowercase : str = requests.json() except Exception: _lowercase : str = req.content.decode() assert data is not None, "could not connect" try: _lowercase : Optional[Any] = eval(_lowerCamelCase ) except Exception: _lowercase : int = data.split("\n" ) req.close() return data def _A ( snake_case ) -> Any: _lowercase : str = requests.get(_lowerCamelCase ) _lowercase : Any = np.array(Image.open(BytesIO(response.content ) ) ) return img def _A ( snake_case ) -> Any: _lowercase : List[str] = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(_lowerCamelCase ) with open(_lowerCamelCase , "rb" ) as stream: _lowercase : Any = pkl.load(_lowerCamelCase ) _lowercase : Optional[Any] = weights.pop("model" ) _lowercase : Any = {} for k, v in model.items(): _lowercase : Union[str, Any] = torch.from_numpy(_lowerCamelCase ) if "running_var" in k: _lowercase : str = torch.tensor([0] ) _lowercase : str = k.replace("running_var" , "num_batches_tracked" ) _lowercase : List[str] = zero return new def _A ( ) -> Optional[int]: print(F'''{os.path.abspath(os.path.join(_lowerCamelCase , os.pardir ) )}/demo.ipynb''' ) def _A ( snake_case , snake_case="RGB" ) -> int: assert isinstance(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ): _lowercase : List[str] = cva.imread(_lowerCamelCase ) else: _lowercase : List[str] = get_image_from_url(_lowerCamelCase ) assert img is not None, F'''could not connect to: {im}''' _lowercase : Optional[Any] = cva.cvtColor(_lowerCamelCase , cva.COLOR_BGR2RGB ) if input_format == "RGB": _lowercase : Optional[Any] = img[:, :, ::-1] return img def _A ( snake_case , snake_case=1 ) -> Optional[int]: return (images[i : i + batch] for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ))
250
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase_ ( a): def snake_case__ ( self, __a): '''simple docstring''' return 0.0 def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = 512 _lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs] _lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) ) _lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds _lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_lowerCamelCase ) plt.show() def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = 512 _lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1) _lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs] _lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) ) plt.show()
36
0
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__( __A ): def snake_case__ ( self ) -> str: A__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a ,'hidden_sizes' ) ) self.parent.assertTrue(hasattr(__a ,'num_attention_heads' ) ) class UpperCamelCase__: def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=64 ,__UpperCAmelCase=3 ,__UpperCAmelCase=3 ,__UpperCAmelCase=2 ,__UpperCAmelCase=1 ,__UpperCAmelCase=16 ,__UpperCAmelCase=[1_28, 2_56, 3_84] ,__UpperCAmelCase=[4, 6, 8] ,__UpperCAmelCase=[2, 3, 4] ,__UpperCAmelCase=[16, 16, 16] ,__UpperCAmelCase=0 ,__UpperCAmelCase=[2, 2, 2] ,__UpperCAmelCase=[2, 2, 2] ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=2 ,) -> Union[str, Any]: A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = kernel_size A__ = stride A__ = padding A__ = hidden_sizes A__ = num_attention_heads A__ = depths A__ = key_dim A__ = drop_path_rate A__ = patch_size A__ = attention_ratio A__ = mlp_ratio A__ = initializer_range A__ = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] A__ = is_training A__ = use_labels A__ = num_labels A__ = initializer_range def snake_case__ ( self ) -> Tuple: A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] ,self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self ) -> int: return LevitConfig( image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: A__ = LevitModel(config=__a ) model.to(__a ) model.eval() A__ = model(__a ) A__ = (self.image_size, self.image_size) A__ = image_size[0], image_size[1] for _ in range(4 ): A__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) A__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,) def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: A__ = self.num_labels A__ = LevitForImageClassification(__a ) model.to(__a ) model.eval() A__ = model(__a ,labels=__a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def snake_case__ ( self ) -> Dict: A__ = self.prepare_config_and_inputs() A__ = config_and_inputs A__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase__( __A , __A , unittest.TestCase ): lowerCAmelCase__ : Any = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) lowerCAmelCase__ : List[str] = ( { 'feature-extraction': LevitModel, 'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : Dict = False lowerCAmelCase__ : str = False lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[str] = False def snake_case__ ( self ) -> Any: A__ = LevitModelTester(self ) A__ = ConfigTester(self ,config_class=__a ,has_text_modality=__a ,hidden_size=37 ) def snake_case__ ( self ) -> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self ) -> Any: return @unittest.skip(reason='Levit does not use inputs_embeds' ) def snake_case__ ( self ) -> str: pass @unittest.skip(reason='Levit does not support input and output embeddings' ) def snake_case__ ( self ) -> List[str]: pass @unittest.skip(reason='Levit does not output attentions' ) def snake_case__ ( self ) -> Optional[int]: pass def snake_case__ ( self ) -> Any: A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__a ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["pixel_values"] self.assertListEqual(arg_names[:1] ,__a ) def snake_case__ ( self ) -> List[Any]: def check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ): A__ = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(__a ,__a ) ) A__ = outputs.hidden_states A__ = len(self.model_tester.depths ) + 1 self.assertEqual(len(__a ) ,__a ) A__ = (self.model_tester.image_size, self.model_tester.image_size) A__ = image_size[0], image_size[1] for _ in range(4 ): A__ = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) A__ = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[ height * width, self.model_tester.hidden_sizes[0], ] ,) A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(__a ,__a ,__a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(__a ,__a ,__a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case__ ( self ) -> Optional[int]: pass def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Dict: A__ = super()._prepare_for_class(__a ,__a ,return_labels=__a ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def snake_case__ ( self ) -> Dict: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def snake_case__ ( self ) -> Dict: A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def snake_case__ ( self ) -> Optional[int]: if not self.model_tester.is_training: return A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__a ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue A__ = model_class(__a ) model.to(__a ) model.train() A__ = self._prepare_for_class(__a ,__a ,return_labels=__a ) A__ = model(**__a ).loss loss.backward() def snake_case__ ( self ) -> List[Any]: A__ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A__ = False A__ = True for model_class in self.all_model_classes: if model_class in get_values(__a ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue A__ = model_class(__a ) model.gradient_checkpointing_enable() model.to(__a ) model.train() A__ = self._prepare_for_class(__a ,__a ,return_labels=__a ) A__ = model(**__a ).loss loss.backward() def snake_case__ ( self ) -> Union[str, Any]: A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__a ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ): A__ = problem_type["title"] A__ = problem_type["num_labels"] A__ = model_class(__a ) model.to(__a ) model.train() A__ = self._prepare_for_class(__a ,__a ,return_labels=__a ) if problem_type["num_labels"] > 1: A__ = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] ) A__ = inputs["labels"].to(problem_type['dtype'] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__a ) as warning_list: A__ = model(**__a ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def snake_case__ ( self ) -> Tuple: for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = LevitModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def UpperCAmelCase ( ): """simple docstring""" A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase__( unittest.TestCase ): @cached_property def snake_case__ ( self ) -> List[str]: return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def snake_case__ ( self ) -> List[Any]: A__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __a ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=__a ,return_tensors='pt' ).to(__a ) # forward pass with torch.no_grad(): A__ = model(**__a ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,__a ) A__ = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__a ,atol=1e-4 ) )
221
def A ( _lowerCamelCase ): '''simple docstring''' if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence _lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase ) # # convert them to integers for i in range(len(_lowerCamelCase ) ): _lowerCAmelCase : List[str] = int(sequence[i] , 2 ) return sequence def A ( _lowerCamelCase ): '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] _lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits _lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 ) _lowerCAmelCase : str = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): _lowerCAmelCase : Dict = "0" + smaller_sequence[i] sequence.append(_lowerCamelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): _lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i] sequence.append(_lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
36
0
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=[10, 20, 30, 40], SCREAMING_SNAKE_CASE_=[1, 1, 2, 1], SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="relu", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=None, ) -> Any: UpperCamelCase : Optional[Any] = parent UpperCamelCase : Optional[Any] = batch_size UpperCamelCase : Optional[Any] = image_size UpperCamelCase : str = num_channels UpperCamelCase : List[Any] = embeddings_size UpperCamelCase : Dict = hidden_sizes UpperCamelCase : Tuple = depths UpperCamelCase : Any = is_training UpperCamelCase : Tuple = use_labels UpperCamelCase : Union[str, Any] = hidden_act UpperCamelCase : List[Any] = num_labels UpperCamelCase : Any = scope UpperCamelCase : int = len(__a ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : Tuple = None if self.use_labels: UpperCamelCase : Tuple = ids_tensor([self.batch_size], self.num_labels ) UpperCamelCase : int = self.get_config() return config, pixel_values, labels def snake_case_ ( self ) -> Optional[int]: return RegNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any: UpperCamelCase : Union[str, Any] = TFRegNetModel(config=__a ) UpperCamelCase : Any = model(__a, training=__a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase : Dict = self.num_labels UpperCamelCase : Optional[Any] = TFRegNetForImageClassification(__a ) UpperCamelCase : Optional[Any] = model(__a, labels=__a, training=__a ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def snake_case_ ( self ) -> str: UpperCamelCase : List[Any] = self.prepare_config_and_inputs() UpperCamelCase : Optional[Any] = config_and_inputs UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : Dict = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () UpperCAmelCase__ : str = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : List[str] = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : Any = False def snake_case_ ( self ) -> Dict: UpperCamelCase : List[str] = TFRegNetModelTester(self ) UpperCamelCase : Optional[Any] = ConfigTester(self, config_class=__a, has_text_modality=__a ) def snake_case_ ( self ) -> Optional[Any]: return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def snake_case_ ( self ) -> Dict: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0, reason='TF does not support backprop for grouped convolutions on CPU.', ) @slow def snake_case_ ( self ) -> Any: super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def snake_case_ ( self ) -> Union[str, Any]: pass def snake_case_ ( self ) -> List[str]: UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : List[Any] = model_class(__a ) UpperCamelCase : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : str = [*signature.parameters.keys()] UpperCamelCase : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1], __a ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def snake_case_ ( self ) -> List[Any]: def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): UpperCamelCase : Any = model_class(__a ) UpperCamelCase : Optional[int] = model(**self._prepare_for_class(__a, __a ), training=__a ) UpperCamelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__a ), expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], ) UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase : str = layer_type UpperCamelCase : Optional[Any] = True check_hidden_states_output(__a, __a, __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : Optional[Any] = True check_hidden_states_output(__a, __a, __a ) def snake_case_ ( self ) -> Dict: UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_={} ): UpperCamelCase : Union[str, Any] = model(__a, return_dict=__a, **__a ) UpperCamelCase : Any = model(__a, return_dict=__a, **__a ).to_tuple() def recursive_check(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): if isinstance(__a, (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__a, __a ): recursive_check(__a, __a ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__a, __a ) ), msg=( 'Tuple and dict output are not equal. Difference:' F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ), ) recursive_check(__a, __a ) for model_class in self.all_model_classes: UpperCamelCase : str = model_class(__a ) UpperCamelCase : int = self._prepare_for_class(__a, __a ) UpperCamelCase : int = self._prepare_for_class(__a, __a ) check_equivalence(__a, __a, __a ) UpperCamelCase : Any = self._prepare_for_class(__a, __a, return_labels=__a ) UpperCamelCase : Optional[int] = self._prepare_for_class(__a, __a, return_labels=__a ) check_equivalence(__a, __a, __a ) UpperCamelCase : List[str] = self._prepare_for_class(__a, __a ) UpperCamelCase : List[Any] = self._prepare_for_class(__a, __a ) check_equivalence(__a, __a, __a, {'output_hidden_states': True} ) UpperCamelCase : Tuple = self._prepare_for_class(__a, __a, return_labels=__a ) UpperCamelCase : List[Any] = self._prepare_for_class(__a, __a, return_labels=__a ) check_equivalence(__a, __a, __a, {'output_hidden_states': True} ) def snake_case_ ( self ) -> Tuple: UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def snake_case_ ( self ) -> List[Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : int = TFRegNetModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def UpperCamelCase ( ) -> Optional[int]: UpperCamelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def snake_case_ ( self ) -> Optional[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case_ ( self ) -> Dict: UpperCamelCase : Tuple = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase : Optional[Any] = self.default_image_processor UpperCamelCase : Dict = prepare_img() UpperCamelCase : List[Any] = image_processor(images=__a, return_tensors='tf' ) # forward pass UpperCamelCase : Optional[int] = model(**__a, training=__a ) # verify the logits UpperCamelCase : Dict = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape, __a ) UpperCamelCase : int = tf.constant([-0.41_80, -1.50_51, -3.48_36] ) tf.debugging.assert_near(outputs.logits[0, :3], __a, atol=1e-4 )
119
from PIL import Image def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : int = image.size _lowerCAmelCase : Any = 0 _lowerCAmelCase : Tuple = image.load() for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): _lowerCAmelCase : Union[str, Any] = pixels[j, i] mean += pixel mean //= width * height for j in range(_lowerCamelCase ): for i in range(_lowerCamelCase ): _lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": _snake_case = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
36
0
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class UpperCamelCase_ : def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any: if len(__a ) != degree + 1: raise ValueError( "The number of coefficients should be equal to the degree + 1." ) UpperCAmelCase_ : list[float] = list(__a ) UpperCAmelCase_ : Any = degree def __add__( self : Dict , lowerCAmelCase_ : Optional[int] ) -> Dict: if self.degree > polynomial_a.degree: UpperCAmelCase_ : Optional[Any] = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , __a ) else: UpperCAmelCase_ : Union[str, Any] = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , __a ) def __sub__( self : Dict , lowerCAmelCase_ : Dict ) -> List[Any]: return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : Dict ) -> Union[str, Any]: return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> List[str]: UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , __a ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = "" for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__a ) return polynomial def __repr__( self : Tuple ) -> Union[str, Any]: return self.__str__() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : list[float] = [0] * self.degree for i in range(self.degree ): UpperCAmelCase_ : Optional[Any] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , __a ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Any = 0 ) -> List[Any]: UpperCAmelCase_ : list[float] = [0] * (self.degree + 2) UpperCAmelCase_ : Dict = constant for i in range(self.degree + 1 ): UpperCAmelCase_ : Tuple = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , __a ) def __eq__( self : int , lowerCAmelCase_ : Tuple ) -> Optional[Any]: if not isinstance(__a , __a ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> Tuple: return not self.__eq__(__a )
268
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class UpperCAmelCase_ ( a): lowerCamelCase__ = 'wav2vec2' def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ): '''simple docstring''' super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a) _lowerCAmelCase : str = hidden_size _lowerCAmelCase : Optional[int] = feat_extract_norm _lowerCAmelCase : Union[str, Any] = feat_extract_activation _lowerCAmelCase : Optional[Any] = list(__a) _lowerCAmelCase : List[str] = list(__a) _lowerCAmelCase : str = list(__a) _lowerCAmelCase : List[str] = conv_bias _lowerCAmelCase : str = num_conv_pos_embeddings _lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups _lowerCAmelCase : str = len(self.conv_dim) _lowerCAmelCase : List[str] = num_hidden_layers _lowerCAmelCase : str = intermediate_size _lowerCAmelCase : Any = hidden_act _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : Optional[Any] = hidden_dropout _lowerCAmelCase : List[str] = attention_dropout _lowerCAmelCase : Tuple = activation_dropout _lowerCAmelCase : int = feat_proj_dropout _lowerCAmelCase : List[str] = final_dropout _lowerCAmelCase : int = layerdrop _lowerCAmelCase : int = layer_norm_eps _lowerCAmelCase : Union[str, Any] = initializer_range _lowerCAmelCase : str = vocab_size _lowerCAmelCase : Optional[Any] = do_stable_layer_norm _lowerCAmelCase : Any = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase : str = apply_spec_augment _lowerCAmelCase : Optional[Any] = mask_time_prob _lowerCAmelCase : Optional[int] = mask_time_length _lowerCAmelCase : List[str] = mask_time_min_masks _lowerCAmelCase : Optional[int] = mask_feature_prob _lowerCAmelCase : Optional[int] = mask_feature_length _lowerCAmelCase : List[str] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCAmelCase : Union[str, Any] = num_codevectors_per_group _lowerCAmelCase : str = num_codevector_groups _lowerCAmelCase : Optional[int] = contrastive_logits_temperature _lowerCAmelCase : Optional[int] = feat_quantizer_dropout _lowerCAmelCase : Optional[int] = num_negatives _lowerCAmelCase : Union[str, Any] = codevector_dim _lowerCAmelCase : Any = proj_codevector_dim _lowerCAmelCase : Optional[int] = diversity_loss_weight # ctc loss _lowerCAmelCase : Tuple = ctc_loss_reduction _lowerCAmelCase : Tuple = ctc_zero_infinity # adapter _lowerCAmelCase : List[Any] = add_adapter _lowerCAmelCase : List[str] = adapter_kernel_size _lowerCAmelCase : str = adapter_stride _lowerCAmelCase : List[str] = num_adapter_layers _lowerCAmelCase : str = output_hidden_size or hidden_size _lowerCAmelCase : Tuple = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase : str = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase : str = list(__a) _lowerCAmelCase : Union[str, Any] = list(__a) _lowerCAmelCase : List[str] = list(__a) _lowerCAmelCase : Tuple = xvector_output_dim @property def snake_case__ ( self): '''simple docstring''' return functools.reduce(operator.mul, self.conv_stride, 1)
36
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: __A = None __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } __A = { "google/fnet-base": 5_12, "google/fnet-large": 5_12, } __A = "▁" class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ['''input_ids''', '''token_type_ids'''] snake_case_ = FNetTokenizer def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="<unk>" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<pad>" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , **lowerCamelCase__ , ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = ( AddedToken(__a , lstrip=__a , rstrip=__a , normalized=__a ) if isinstance(__a , __a ) else mask_token ) super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , ) __lowerCamelCase = do_lower_case __lowerCamelCase = remove_space __lowerCamelCase = keep_accents __lowerCamelCase = vocab_file __lowerCamelCase = False if not self.vocab_file else True def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[Any]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[str]: '''simple docstring''' if not os.path.isdir(__a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) return (out_vocab_file,)
90
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , ) class UpperCAmelCase_ ( a): lowerCamelCase__ = RobertaConfig lowerCamelCase__ = 'roberta' def __init__( self, __a): '''simple docstring''' super().__init__(__a) _lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , ) class UpperCAmelCase_ ( a): lowerCamelCase__ = RobertaConfig lowerCamelCase__ = 'roberta' def __init__( self, __a): '''simple docstring''' super().__init__(__a) _lowerCAmelCase : Optional[int] = config.num_labels _lowerCAmelCase : Optional[int] = config.num_hidden_layers _lowerCAmelCase : Optional[int] = DeeRobertaModel(__a) _lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob) _lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels) @add_start_docstrings_to_model_forward(__a) def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.num_layers try: _lowerCAmelCase : List[Any] = self.roberta( __a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, ) _lowerCAmelCase : List[Any] = outputs[1] _lowerCAmelCase : Dict = self.dropout(__a) _lowerCAmelCase : Dict = self.classifier(__a) _lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowerCAmelCase : Tuple = e.message _lowerCAmelCase : Union[str, Any] = e.exit_layer _lowerCAmelCase : List[Any] = outputs[0] if not self.training: _lowerCAmelCase : int = entropy(__a) _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : str = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowerCAmelCase : Optional[Any] = MSELoss() _lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1)) else: _lowerCAmelCase : Optional[Any] = CrossEntropyLoss() _lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits _lowerCAmelCase : Optional[int] = [] for highway_exit in outputs[-1]: _lowerCAmelCase : Any = highway_exit[0] if not self.training: highway_logits_all.append(__a) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression _lowerCAmelCase : List[str] = MSELoss() _lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1)) else: _lowerCAmelCase : Dict = CrossEntropyLoss() _lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(__a) if train_highway: _lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: _lowerCAmelCase : Any = (loss,) + outputs if not self.training: _lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowerCAmelCase : Optional[Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
36
0
import numpy as np def _a ( UpperCamelCase_ : int ) -> List[Any]: """simple docstring""" return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
340
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( a): lowerCamelCase__ = 'vision-encoder-decoder' lowerCamelCase__ = True def __init__( self, **__a): '''simple docstring''' super().__init__(**__a) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}") _lowerCAmelCase : str = kwargs.pop("encoder") _lowerCAmelCase : Any = encoder_config.pop("model_type") _lowerCAmelCase : str = kwargs.pop("decoder") _lowerCAmelCase : List[str] = decoder_config.pop("model_type") _lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : Optional[int] = True @classmethod def snake_case__ ( cls, __a, __a, **__a): '''simple docstring''' logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config") _lowerCAmelCase : Optional[Any] = True _lowerCAmelCase : str = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = copy.deepcopy(self.__dict__) _lowerCAmelCase : List[str] = self.encoder.to_dict() _lowerCAmelCase : List[str] = self.decoder.to_dict() _lowerCAmelCase : Any = self.__class__.model_type return output class UpperCAmelCase_ ( a): lowerCamelCase__ = version.parse('1.11') @property def snake_case__ ( self): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def snake_case__ ( self): '''simple docstring''' return 1E-4 @property def snake_case__ ( self): '''simple docstring''' return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}}) class UpperCAmelCase_ ( a): @property def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = OrderedDict() _lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"} _lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"} _lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"} return common_inputs def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ): '''simple docstring''' import torch _lowerCAmelCase : Optional[Any] = OrderedDict() _lowerCAmelCase : List[str] = super().generate_dummy_inputs( __a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a) _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape _lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size) _lowerCAmelCase : List[str] = dummy_input.pop("input_ids") _lowerCAmelCase : List[str] = dummy_input.pop("attention_mask") _lowerCAmelCase : Optional[int] = torch.zeros(__a) return common_inputs class UpperCAmelCase_ ( a): @property def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self, __a): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(__a) def snake_case__ ( self, __a, __a, __a = "default"): '''simple docstring''' _lowerCAmelCase : Dict = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
36
0
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py snake_case__ : Optional[int] = '.' if __name__ == "__main__": snake_case__ : Optional[Any] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt') snake_case__ : Optional[int] = [] snake_case__ : Tuple = [] with open(doctest_file_path) as fp: for line in fp: snake_case__ : int = line.strip() snake_case__ : Any = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: snake_case__ : Union[str, Any] = '\n'.join(non_existent_paths) raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}') if all_paths != sorted(all_paths): raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
117
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class UpperCAmelCase_ ( a): def __get__( self, __a, __a=None): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") _lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__ _lowerCAmelCase : Dict = getattr(__a, __a, __a) if cached is None: _lowerCAmelCase : str = self.fget(__a) setattr(__a, __a, __a) return cached def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Any = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"invalid truth value {val!r}" ) def A ( _lowerCamelCase ): '''simple docstring''' if is_torch_fx_proxy(_lowerCamelCase ): return True if is_torch_available(): import torch if isinstance(_lowerCamelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(_lowerCamelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ): return True return isinstance(_lowerCamelCase , np.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return isinstance(_lowerCamelCase , np.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return _is_numpy(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch return isinstance(_lowerCamelCase , torch.Tensor ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch return isinstance(_lowerCamelCase , torch.device ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_device(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch if isinstance(_lowerCamelCase , _lowerCamelCase ): if hasattr(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ) else: return False return isinstance(_lowerCamelCase , torch.dtype ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import tensorflow as tf return isinstance(_lowerCamelCase , tf.Tensor ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_lowerCamelCase , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(_lowerCamelCase ) return type(_lowerCamelCase ) == tf.Tensor def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import jax.numpy as jnp # noqa: F811 return isinstance(_lowerCamelCase , jnp.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_flax_available() else _is_jax(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' if isinstance(_lowerCamelCase , (dict, UserDict) ): return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple) ): return [to_py_obj(_lowerCamelCase ) for o in obj] elif is_tf_tensor(_lowerCamelCase ): return obj.numpy().tolist() elif is_torch_tensor(_lowerCamelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(_lowerCamelCase ): return np.asarray(_lowerCamelCase ).tolist() elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def A ( _lowerCamelCase ): '''simple docstring''' if isinstance(_lowerCamelCase , (dict, UserDict) ): return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple) ): return np.array(_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): return obj.numpy() elif is_torch_tensor(_lowerCamelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(_lowerCamelCase ): return np.asarray(_lowerCamelCase ) else: return obj class UpperCAmelCase_ ( a): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = fields(self) # Safety and consistency checks if not len(__a): raise ValueError(f"{self.__class__.__name__} has no fields.") if not all(field.default is None for field in class_fields[1:]): raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") _lowerCAmelCase : Dict = getattr(self, class_fields[0].name) _lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(__a): if isinstance(__a, __a): _lowerCAmelCase : Tuple = first_field.items() _lowerCAmelCase : Dict = True else: try: _lowerCAmelCase : Dict = iter(__a) _lowerCAmelCase : Any = True except TypeError: _lowerCAmelCase : Any = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__a): if ( not isinstance(__a, (list, tuple)) or not len(__a) == 2 or not isinstance(element[0], __a) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute _lowerCAmelCase : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"Cannot set key/value for {element}. It needs to be a tuple (key, value).") break setattr(self, element[0], element[1]) if element[1] is not None: _lowerCAmelCase : Any = element[1] elif first_field is not None: _lowerCAmelCase : Any = first_field else: for field in class_fields: _lowerCAmelCase : Dict = getattr(self, field.name) if v is not None: _lowerCAmelCase : Union[str, Any] = v def __delitem__( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__( self, __a): '''simple docstring''' if isinstance(__a, __a): _lowerCAmelCase : Optional[int] = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self, __a, __a): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__a, __a) super().__setattr__(__a, __a) def __setitem__( self, __a, __a): '''simple docstring''' super().__setitem__(__a, __a) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__a, __a) def snake_case__ ( self): '''simple docstring''' return tuple(self[k] for k in self.keys()) class UpperCAmelCase_ ( a , a): @classmethod def snake_case__ ( cls, __a): '''simple docstring''' raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}") class UpperCAmelCase_ ( a): lowerCamelCase__ = 'longest' lowerCamelCase__ = 'max_length' lowerCamelCase__ = 'do_not_pad' class UpperCAmelCase_ ( a): lowerCamelCase__ = 'pt' lowerCamelCase__ = 'tf' lowerCamelCase__ = 'np' lowerCamelCase__ = 'jax' class UpperCAmelCase_ : def __init__( self, __a): '''simple docstring''' _lowerCAmelCase : Tuple = context_managers _lowerCAmelCase : Dict = ExitStack() def __enter__( self): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(__a) def __exit__( self, *__a, **__a): '''simple docstring''' self.stack.__exit__(*__a, **__a) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : str = infer_framework(_lowerCamelCase ) if framework == "tf": _lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : str = model_class.__name__ _lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase ) if framework == "tf": _lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ): '''simple docstring''' def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ): for k, v in d.items(): _lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k if v and isinstance(_lowerCamelCase , _lowerCamelCase ): yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items() else: yield key, v return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) @contextmanager def A ( _lowerCamelCase , _lowerCamelCase = False ): '''simple docstring''' if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def A ( _lowerCamelCase , _lowerCamelCase=None ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.transpose(_lowerCamelCase , axes=_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.T if axes is None else array.permute(*_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase ) else: raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.reshape(_lowerCamelCase , _lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.reshape(*_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.reshape(_lowerCamelCase , _lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.reshape(_lowerCamelCase , _lowerCamelCase ) else: raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase=None ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) else: raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.expand_dims(_lowerCamelCase , _lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.unsqueeze(dim=_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase ) else: raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.size(_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.numel() elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.size(_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return array.size else: raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' for key, value in auto_map.items(): if isinstance(_lowerCamelCase , (tuple, list) ): _lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: _lowerCAmelCase : Tuple = F"{repo_id}--{value}" return auto_map def A ( _lowerCamelCase ): '''simple docstring''' for base_class in inspect.getmro(_lowerCamelCase ): _lowerCAmelCase : Tuple = base_class.__module__ _lowerCAmelCase : int = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"Could not infer framework from class {model_class}." )
36
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__) UpperCAmelCase : Dict ={ """alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""", } class _lowercase (a_ ): '''simple docstring''' lowercase__ = """mgp-str""" def __init__( self , snake_case__=[32, 128] , snake_case__=4 , snake_case__=3 , snake_case__=27 , snake_case__=38 , snake_case__=5_0257 , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=4.0 , snake_case__=True , snake_case__=False , snake_case__=1e-5 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=False , snake_case__=0.02 , **snake_case__ , ): '''simple docstring''' super().__init__(**__a ) UpperCamelCase_ = image_size UpperCamelCase_ = patch_size UpperCamelCase_ = num_channels UpperCamelCase_ = max_token_length UpperCamelCase_ = num_character_labels UpperCamelCase_ = num_bpe_labels UpperCamelCase_ = num_wordpiece_labels UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = mlp_ratio UpperCamelCase_ = distilled UpperCamelCase_ = layer_norm_eps UpperCamelCase_ = drop_rate UpperCamelCase_ = qkv_bias UpperCamelCase_ = attn_drop_rate UpperCamelCase_ = drop_path_rate UpperCamelCase_ = output_aa_attentions UpperCamelCase_ = initializer_range
128
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if expected is RuntimeError: with pytest.raises(_lowerCamelCase ): _number_of_shards_in_gen_kwargs(_lowerCamelCase ) else: _lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase ) assert out == expected
36
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable lowercase : Dict = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
20
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class UpperCAmelCase_ : def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"): '''simple docstring''' _lowerCAmelCase : Optional[int] = device _lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a) _lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073] _lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std) _lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224) _lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.resize(__a) _lowerCAmelCase : List[str] = self.center_crop(__a) _lowerCAmelCase : Optional[Any] = self.normalize(__a) return images def __call__( self, __a=None, __a=None, **__a): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(text=__a, **__a) _lowerCAmelCase : List[str] = self.preprocess_img(__a) _lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class UpperCAmelCase_ ( nn.Module): def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ): '''simple docstring''' super().__init__() _lowerCAmelCase : List[str] = None _lowerCAmelCase : List[str] = device if device else get_device() if vqgan: _lowerCAmelCase : Union[str, Any] = vqgan else: _lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a) self.vqgan.eval() if clip: _lowerCAmelCase : str = clip else: _lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") self.clip.to(self.device) _lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device) _lowerCAmelCase : Any = iterations _lowerCAmelCase : List[Any] = lr _lowerCAmelCase : Tuple = log _lowerCAmelCase : List[str] = make_grid _lowerCAmelCase : int = return_val _lowerCAmelCase : Dict = quantize _lowerCAmelCase : Any = self.vqgan.decoder.z_shape def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [] if output_path is None: _lowerCAmelCase : List[Any] = "./animation.gif" if input_path is None: _lowerCAmelCase : str = self.save_path _lowerCAmelCase : str = sorted(glob(input_path + "/*")) if not len(__a): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)") if len(__a) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)") _lowerCAmelCase : Optional[int] = total_duration / len(__a) _lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a) if extend_frames: _lowerCAmelCase : Any = 1.5 _lowerCAmelCase : List[str] = 3 for file_name in paths: if file_name.endswith(".png"): images.append(imageio.imread(__a)) imageio.mimsave(__a, __a, duration=__a) print(f"gif saved to {output_path}") def snake_case__ ( self, __a=None, __a=None): '''simple docstring''' if not (path or img): raise ValueError("Input either path or tensor") if img is not None: raise NotImplementedError _lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device) _lowerCAmelCase : Dict = preprocess_vqgan(__a) _lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a) return z def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_() _lowerCAmelCase : Dict = base_latent + transform_vector if self.quantize: _lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a) else: _lowerCAmelCase : Any = trans_latent return self.vqgan.decode(__a) def snake_case__ ( self, __a, __a, __a=None): '''simple docstring''' _lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a) _lowerCAmelCase : Optional[int] = self.clip(**__a) _lowerCAmelCase : Any = clip_outputs.logits_per_image if weights is not None: _lowerCAmelCase : Tuple = similarity_logits * weights return similarity_logits.sum() def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"])) if neg_prompts: _lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"]) else: _lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device) _lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a) return loss def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device) _lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr) for i in range(self.iterations): optim.zero_grad() _lowerCAmelCase : Any = self._add_vector(__a) _lowerCAmelCase : Optional[Any] = loop_post_process(__a) _lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a) print("CLIP loss", __a) if self.log: wandb.log({"CLIP Loss": clip_loss}) clip_loss.backward(retain_graph=__a) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def snake_case__ ( self, __a, __a, __a): '''simple docstring''' wandb.init(reinit=__a, project="face-editor") wandb.config.update({"Positive Prompts": positive_prompts}) wandb.config.update({"Negative Prompts": negative_prompts}) wandb.config.update({"lr": self.lr, "iterations": self.iterations}) if image_path: _lowerCAmelCase : str = Image.open(__a) _lowerCAmelCase : int = image.resize((256, 256)) wandb.log("Original Image", wandb.Image(__a)) def snake_case__ ( self, __a): '''simple docstring''' if not prompts: return [] _lowerCAmelCase : int = [] _lowerCAmelCase : List[str] = [] if isinstance(__a, __a): _lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")] for prompt in prompts: if isinstance(__a, (tuple, list)): _lowerCAmelCase : Optional[Any] = prompt[0] _lowerCAmelCase : Union[str, Any] = float(prompt[1]) elif ":" in prompt: _lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":") _lowerCAmelCase : Optional[Any] = float(__a) else: _lowerCAmelCase : Optional[int] = prompt _lowerCAmelCase : List[Any] = 1.0 processed_prompts.append(__a) weights.append(__a) return { "prompts": processed_prompts, "weights": torch.tensor(__a, device=self.device), } def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ): '''simple docstring''' if image_path: _lowerCAmelCase : List[Any] = self._get_latent(__a) else: _lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device) if self.log: self._init_logging(__a, __a, __a) assert pos_prompts, "You must provide at least one positive prompt." _lowerCAmelCase : int = self.process_prompts(__a) _lowerCAmelCase : List[str] = self.process_prompts(__a) if save_final and save_path is None: _lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"])) if not os.path.exists(__a): os.makedirs(__a) else: _lowerCAmelCase : Tuple = save_path + "_" + get_timestamp() os.makedirs(__a) _lowerCAmelCase : Tuple = save_path _lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0] if show_intermediate: print("Original Image") show_pil(custom_to_pil(__a)) _lowerCAmelCase : int = loop_post_process(__a) for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)): if show_intermediate: show_pil(__a) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png")) if self.log: wandb.log({"Image": wandb.Image(__a)}) if show_final: show_pil(__a) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
36
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule a__ : Union[str, Any] = {'''tokenization_bertweet''': ['''BertweetTokenizer''']} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys a__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
54
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 _snake_case = get_tests_dir("fixtures") class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = mock.Mock() _lowerCAmelCase : int = 500 _lowerCAmelCase : Tuple = {} _lowerCAmelCase : str = HTTPError _lowerCAmelCase : Union[str, Any] = {} # Download this model to make sure it's in the cache. _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=__a) as mock_head: _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # This check we did call the fake head request mock_head.assert_called() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json") def snake_case__ ( self): '''simple docstring''' with self.assertRaises(__a): # config is in subfolder, the following should not work without specifying the subfolder _lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants") _lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor") self.assertIsNotNone(__a) @is_staging_test class UpperCAmelCase_ ( unittest.TestCase): @classmethod def snake_case__ ( cls): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = TOKEN HfFolder.save_token(__a) @classmethod def snake_case__ ( cls): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="test-image-processor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-image-processor") except HTTPError: pass def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-image-processor", use_auth_token=self._token) _lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token) _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' CustomImageProcessor.register_for_auto_class() _lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) _lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=__a) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
36
0
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def __lowerCamelCase ( ) -> Dict: """simple docstring""" lowerCAmelCase_ : List[str] = torch.nn.Linear(2 , 4 ) lowerCAmelCase_ : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) lowerCAmelCase_ : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_lowerCamelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) lowerCAmelCase_ : List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) lowerCAmelCase_ : Optional[int] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def __lowerCamelCase ( __UpperCamelCase ) -> Any: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def __lowerCamelCase ( __UpperCamelCase ) -> Tuple: """simple docstring""" lowerCAmelCase_ : Tuple = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_lowerCamelCase ) class __lowerCamelCase ( A__ ): '''simple docstring''' @require_cuda def lowerCamelCase ( self : Union[str, Any] ): lowerCAmelCase_ : str = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(__a ): lowerCAmelCase_ : List[str] = Accelerator(cpu=__a ) def lowerCamelCase ( self : Optional[Any] ): lowerCAmelCase_ : int = Accelerator() lowerCAmelCase_ : Any = GradientState() assert state.num_steps == 1 lowerCAmelCase_ : Any = 4 assert state.num_steps == 4 assert state.sync_gradients is True lowerCAmelCase_ : Any = False assert state.sync_gradients is False GradientState._reset_state() def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : Optional[Any] = Accelerator() lowerCAmelCase_ : Any = create_components() ( lowerCAmelCase_ ) : Union[str, Any] = accelerator.prepare(__a , __a , __a , __a , __a ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : Tuple = Accelerator() lowerCAmelCase_ : List[str] = create_components() accelerator.prepare(__a , __a , __a , __a , __a ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def lowerCamelCase ( self : Any ): PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ : Optional[Any] , **a_ : Tuple ): pass with patch("torch.cuda.set_device" , __a ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ): lowerCAmelCase_ : int = Accelerator() self.assertEqual(str(accelerator.state.device ) , "cuda:64" ) def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : int = Accelerator() lowerCAmelCase_ : List[Any] = create_components() accelerator.prepare(__a , __a , __a , __a , __a ) lowerCAmelCase_ : Optional[Any] = get_signature(__a ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__a ) # make sure random weights don't match load_random_weights(__a ) self.assertTrue(abs(model_signature - get_signature(__a ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(__a ) self.assertTrue(abs(model_signature - get_signature(__a ) ) < 1e-3 ) def lowerCamelCase ( self : Union[str, Any] ): lowerCAmelCase_ : Union[str, Any] = Accelerator() lowerCAmelCase_ : List[Any] = create_components() accelerator.prepare(__a , __a , __a , __a , __a ) lowerCAmelCase_ : Optional[Any] = get_signature(__a ) # saving hook def save_config(a_ : List[str] , a_ : Optional[int] , a_ : Dict ): lowerCAmelCase_ : Optional[int] = {"class_name": models[0].__class__.__name__} with open(os.path.join(__a , "data.json" ) , "w" ) as f: json.dump(__a , __a ) # loading hook def load_config(a_ : List[str] , a_ : Tuple ): with open(os.path.join(__a , "data.json" ) , "r" ) as f: lowerCAmelCase_ : Optional[int] = json.load(__a ) lowerCAmelCase_ : List[Any] = config["class_name"] lowerCAmelCase_ : Optional[int] = accelerator.register_save_state_pre_hook(__a ) lowerCAmelCase_ : List[str] = accelerator.register_load_state_pre_hook(__a ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__a ) # make sure random weights don't match with hooks load_random_weights(__a ) self.assertTrue(abs(model_signature - get_signature(__a ) ) > 1e-3 ) # random class name to verify correct one is loaded lowerCAmelCase_ : Any = "random" # make sure loaded weights match with hooks accelerator.load_state(__a ) self.assertTrue(abs(model_signature - get_signature(__a ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__a ) # make sure random weights don't match with hooks removed load_random_weights(__a ) self.assertTrue(abs(model_signature - get_signature(__a ) ) > 1e-3 ) # random class name to verify correct one is loaded lowerCAmelCase_ : int = "random" # make sure loaded weights match with hooks removed accelerator.load_state(__a ) self.assertTrue(abs(model_signature - get_signature(__a ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def lowerCamelCase ( self : Tuple ): lowerCAmelCase_ : Any = Accelerator() lowerCAmelCase_ : List[str] = create_components() lowerCAmelCase_ : Any = None # This should work lowerCAmelCase_ : Tuple = accelerator.prepare( __a , __a , __a , __a , __a , __a ) self.assertTrue(dummy_obj is None ) def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : Optional[Any] = Accelerator() lowerCAmelCase_ : Any = create_components() lowerCAmelCase_ : Any = [1, 2, 3] # This should work lowerCAmelCase_ : Dict = accelerator.prepare( __a , __a , __a , __a , __a , __a ) self.assertEqual( getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Dummy object should have `_is_accelerate_prepared` set to `True`" , ) self.assertEqual( getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Model is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) self.assertEqual( getattr(__a , "_is_accelerate_prepared" , __a ) , __a , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , ) @slow @require_bnb def lowerCamelCase ( self : Optional[int] ): from transformers import AutoModelForCausalLM lowerCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=__a , device_map={"": 0} , ) lowerCAmelCase_ : Dict = Accelerator() # This should work lowerCAmelCase_ : List[str] = accelerator.prepare(__a ) @slow @require_bnb def lowerCamelCase ( self : Tuple ): from transformers import AutoModelForCausalLM lowerCAmelCase_ : List[Any] = Accelerator() with init_empty_weights(): lowerCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() lowerCAmelCase_ : Optional[int] = infer_auto_device_map(__a ) lowerCAmelCase_ : Dict = "cpu" lowerCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , device_map=__a , load_in_abit=__a , llm_inta_enable_fpaa_cpu_offload=__a ) # This should not work and get value error with self.assertRaises(__a ): lowerCAmelCase_ : Optional[Any] = accelerator.prepare(__a ) @slow @require_bnb @require_multi_gpu def lowerCamelCase ( self : Optional[int] ): from transformers import AutoModelForCausalLM lowerCAmelCase_ : Union[str, Any] = {"distributed_type": DistributedType.MULTI_GPU} with init_empty_weights(): lowerCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) model.tie_weights() lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(__a ) lowerCAmelCase_ : Optional[int] = 1 lowerCAmelCase_ : int = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=__a , device_map=__a , ) lowerCAmelCase_ : str = Accelerator() # This should not work and get value error with self.assertRaises(__a ): lowerCAmelCase_ : List[Any] = accelerator.prepare(__a ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def lowerCamelCase ( self : List[str] ): from transformers import AutoModelForCausalLM with init_empty_weights(): lowerCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , ) lowerCAmelCase_ : List[Any] = infer_auto_device_map(__a ) lowerCAmelCase_ : List[str] = 1 lowerCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m" , load_in_abit=__a , device_map=__a , ) lowerCAmelCase_ : Dict = Accelerator() # This should work lowerCAmelCase_ : Union[str, Any] = accelerator.prepare(__a ) @require_cuda def lowerCamelCase ( self : Optional[int] ): lowerCAmelCase_ : str = torch.nn.Linear(10 , 10 ) lowerCAmelCase_ : Union[str, Any] = torch.optim.SGD(model.parameters() , lr=0.01 ) lowerCAmelCase_ : Dict = Accelerator(cpu=__a ) lowerCAmelCase_ : Optional[int] = accelerator.prepare(__a )
241
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : List[str] = batch_size _lowerCAmelCase : int = seq_length _lowerCAmelCase : Optional[int] = is_training _lowerCAmelCase : Dict = use_input_mask _lowerCAmelCase : List[str] = use_token_type_ids _lowerCAmelCase : str = use_labels _lowerCAmelCase : Optional[Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : List[Any] = num_hidden_layers _lowerCAmelCase : Optional[Any] = num_attention_heads _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : List[str] = hidden_act _lowerCAmelCase : Union[str, Any] = hidden_dropout_prob _lowerCAmelCase : Any = attention_probs_dropout_prob _lowerCAmelCase : int = max_position_embeddings _lowerCAmelCase : Optional[int] = type_vocab_size _lowerCAmelCase : Optional[Any] = type_sequence_label_size _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : List[Any] = num_labels _lowerCAmelCase : Tuple = scope _lowerCAmelCase : str = range_bbox def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) _lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCAmelCase : Dict = bbox[i, j, 3] _lowerCAmelCase : int = bbox[i, j, 1] _lowerCAmelCase : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCAmelCase : str = bbox[i, j, 2] _lowerCAmelCase : List[Any] = bbox[i, j, 0] _lowerCAmelCase : str = t _lowerCAmelCase : Optional[Any] = None if self.use_input_mask: _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) _lowerCAmelCase : Dict = None if self.use_token_type_ids: _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Optional[Any] = None if self.use_labels: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size) _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels) _lowerCAmelCase : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def snake_case__ ( self): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = LiltModel(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a) _lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a) _lowerCAmelCase : List[Any] = model(__a, bbox=__a) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Dict = model( __a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Tuple = model( __a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Dict = config_and_inputs _lowerCAmelCase : List[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class UpperCAmelCase_ ( a , a , a , unittest.TestCase): lowerCamelCase__ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def snake_case__ ( self, __a, __a, __a, __a, __a): '''simple docstring''' return True def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = LiltModelTester(self) _lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37) def snake_case__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase : Any = type self.model_tester.create_and_check_model(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) @slow def snake_case__ ( self): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : str = LiltModel.from_pretrained(__a) self.assertIsNotNone(__a) @require_torch @slow class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a) _lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a) _lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a) # forward pass with torch.no_grad(): _lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a) _lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768]) _lowerCAmelCase : List[str] = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, ) self.assertTrue(outputs.last_hidden_state.shape, __a) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
36
0
'''simple docstring''' import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _snake_case = logging.get_logger(__name__) def _A ( snake_case , snake_case ) -> str: _lowercase : Tuple = set() _lowercase : int = [] def parse_line(snake_case ): for line in fp: if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowercase : List[str] = line.decode("UTF-8" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(" " ): # process a single warning and move it to `selected_warnings`. if len(_lowerCamelCase ) > 0: _lowercase : Any = "\n".join(_lowerCamelCase ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(_lowerCamelCase ) buffer.clear() continue else: _lowercase : Tuple = line.strip() buffer.append(_lowerCamelCase ) if from_gh: for filename in os.listdir(_lowerCamelCase ): _lowercase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase ) if not os.path.isdir(_lowerCamelCase ): # read the file if filename != "warnings.txt": continue with open(_lowerCamelCase ) as fp: parse_line(_lowerCamelCase ) else: try: with zipfile.ZipFile(_lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCamelCase ): # read the file if filename != "warnings.txt": continue with z.open(_lowerCamelCase ) as fp: parse_line(_lowerCamelCase ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def _A ( snake_case , snake_case ) -> int: _lowercase : Optional[Any] = set() _lowercase : List[str] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) ) return selected_warnings if __name__ == "__main__": def _A ( snake_case ) -> Dict: return values.split("," ) _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') # optional parameters parser.add_argument( '--targets', default='DeprecationWarning,UserWarning,FutureWarning', type=list_str, help='Comma-separated list of target warning(s) which we want to extract.', ) parser.add_argument( '--from_gh', action='store_true', help='If running from a GitHub action workflow and collecting warnings from its artifacts.', ) _snake_case = parser.parse_args() _snake_case = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _snake_case = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('=' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _snake_case = extract_warnings(args.output_dir, args.targets) _snake_case = sorted(selected_warnings) with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
250
import argparse import copy def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = {} with open(_lowerCamelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _lowerCAmelCase : Tuple = [] _list.append([line.split()[1], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _lowerCAmelCase : str = [] _list.append([line.split()[0], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' with open(_lowerCamelCase ) as f: _lowerCAmelCase : str = f.read(1 ) _lowerCAmelCase : str = start_node _lowerCAmelCase : List[str] = [] _lowerCAmelCase : Any = start_node _lowerCAmelCase : str = 0 while visiting not in first_solution: _lowerCAmelCase : Dict = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution: _lowerCAmelCase : List[str] = k[1] _lowerCAmelCase : List[Any] = k[0] first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase ) _lowerCAmelCase : str = best_node first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 _lowerCAmelCase : Tuple = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = [] for n in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) for kn in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) if n == kn: continue _lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase ) _lowerCAmelCase : int = kn _lowerCAmelCase : Dict = n _lowerCAmelCase : Optional[int] = 0 for k in _tmp[:-1]: _lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: _lowerCAmelCase : Optional[Any] = distance + int(i[1] ) _tmp.append(_lowerCamelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) _lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = 1 _lowerCAmelCase : int = first_solution _lowerCAmelCase : Tuple = [] _lowerCAmelCase : Tuple = distance_of_first_solution _lowerCAmelCase : Optional[int] = solution while count <= iters: _lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Dict = neighborhood[index_of_best_solution] _lowerCAmelCase : int = len(_lowerCamelCase ) - 1 _lowerCAmelCase : Union[str, Any] = False while not found: _lowerCAmelCase : Tuple = 0 while i < len(_lowerCamelCase ): if best_solution[i] != solution[i]: _lowerCAmelCase : str = best_solution[i] _lowerCAmelCase : Tuple = solution[i] break _lowerCAmelCase : int = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) _lowerCAmelCase : Optional[int] = True _lowerCAmelCase : Optional[Any] = best_solution[:-1] _lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: _lowerCAmelCase : Union[str, Any] = cost _lowerCAmelCase : List[Any] = solution else: _lowerCAmelCase : Optional[Any] = index_of_best_solution + 1 _lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution] if len(_lowerCamelCase ) >= size: tabu_list.pop(0 ) _lowerCAmelCase : int = count + 1 return best_solution_ever, best_cost def A ( _lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase : int = generate_neighbours(args.File ) _lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution( args.File , _lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Any = tabu_search( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
36
0
"""simple docstring""" # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" A__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] A__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } A__ = F'''{src_lang}-{tgt_lang}''' A__ = F'''\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n''' os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) A__ = os.path.join(_lowerCamelCase , 'README.md' ) print(F'''Generating {path}''' ) with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f: f.write(_lowerCamelCase ) # make sure we are under the root of the project __lowerCamelCase = Path(__file__).resolve().parent.parent.parent __lowerCamelCase = repo_dir / "model_cards" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = model_name.split("-") __lowerCamelCase = model_cards_dir / "facebook" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
221
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin _snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = BartphoTokenizer lowerCamelCase__ = False lowerCamelCase__ = True def snake_case__ ( self): '''simple docstring''' super().setUp() _lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"] _lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a)))) _lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"} _lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n") _lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def snake_case__ ( self, **__a): '''simple docstring''' kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = "This is a là test" _lowerCAmelCase : Optional[int] = "This is a<unk><unk> test" return input_text, output_text def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map) _lowerCAmelCase : List[Any] = "This is a là test" _lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split() _lowerCAmelCase : str = tokenizer.tokenize(__a) self.assertListEqual(__a, __a) _lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token] _lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
36
0
from __future__ import annotations from scipy.special import comb # type: ignore class lowerCAmelCase_ : def __init__( self, SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCamelCase : Optional[int] = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. UpperCamelCase : Optional[Any] = len(__a ) - 1 def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: assert 0 <= t <= 1, "Time t must be between 0 and 1." UpperCamelCase : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree, __a ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__a ), 5 ) == 1 return output_values def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int: assert 0 <= t <= 1, "Time t must be between 0 and 1." UpperCamelCase : Tuple = self.basis_function(__a ) UpperCamelCase : Any = 0.0 UpperCamelCase : Optional[int] = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ = 0.01 ) -> Union[str, Any]: from matplotlib import pyplot as plt # type: ignore UpperCamelCase : list[float] = [] # x coordinates of points to plot UpperCamelCase : list[float] = [] # y coordinates of points to plot UpperCamelCase : List[str] = 0.0 while t <= 1: UpperCamelCase : int = self.bezier_curve_function(__a ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size UpperCamelCase : List[Any] = [i[0] for i in self.list_of_points] UpperCamelCase : Union[str, Any] = [i[1] for i in self.list_of_points] plt.plot( __a, __a, color='blue', label='Curve of Degree ' + str(self.degree ), ) plt.scatter(__a, __a, color='red', label='Control Points' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
119
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ): _lowerCAmelCase : Tuple = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: _lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple return x _lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Any = output_size # determine new height and width _lowerCAmelCase : List[Any] = output_height / input_height _lowerCAmelCase : Any = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowerCAmelCase : Union[str, Any] = scale_width else: # fit height _lowerCAmelCase : Union[str, Any] = scale_height _lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase ) _lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase ) return (new_height, new_width) class UpperCAmelCase_ ( a): lowerCamelCase__ = ['pixel_values'] def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ): '''simple docstring''' super().__init__(**__a) _lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384} _lowerCAmelCase : Optional[int] = get_size_dict(__a) _lowerCAmelCase : Optional[Any] = do_resize _lowerCAmelCase : Dict = size _lowerCAmelCase : Any = keep_aspect_ratio _lowerCAmelCase : str = ensure_multiple_of _lowerCAmelCase : str = resample _lowerCAmelCase : Dict = do_rescale _lowerCAmelCase : Optional[int] = rescale_factor _lowerCAmelCase : Dict = do_normalize _lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ): '''simple docstring''' _lowerCAmelCase : List[Any] = get_size_dict(__a) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") _lowerCAmelCase : List[Any] = get_resize_output_image_size( __a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, ) return resize(__a, size=__a, resample=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a = None, **__a, ): '''simple docstring''' return rescale(__a, scale=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ): '''simple docstring''' return normalize(__a, mean=__a, std=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ): '''simple docstring''' _lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase : List[Any] = size if size is not None else self.size _lowerCAmelCase : str = get_size_dict(__a) _lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowerCAmelCase : int = resample if resample is not None else self.resample _lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std _lowerCAmelCase : Optional[Any] = make_list_of_images(__a) if not valid_images(__a): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. _lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images] if do_resize: _lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images] if do_rescale: _lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images] if do_normalize: _lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images] _lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images] _lowerCAmelCase : Optional[Any] = {"pixel_values": images} return BatchFeature(data=__a, tensor_type=__a) def snake_case__ ( self, __a, __a = None): '''simple docstring''' _lowerCAmelCase : Optional[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__a) != len(__a): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits") if is_torch_tensor(__a): _lowerCAmelCase : List[Any] = target_sizes.numpy() _lowerCAmelCase : Dict = [] for idx in range(len(__a)): _lowerCAmelCase : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a) _lowerCAmelCase : int = resized_logits[0].argmax(dim=0) semantic_segmentation.append(__a) else: _lowerCAmelCase : Dict = logits.argmax(dim=1) _lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
36
0
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowerCamelCase_ = importlib.util.find_spec('''s3fs''') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowerCamelCase_ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def snake_case ( A__ ): if "://" in dataset_path: UpperCAmelCase_ : Any = dataset_path.split("://" )[1] return dataset_path def snake_case ( A__ ): if fs is not None and fs.protocol != "file": return True else: return False def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Tuple = not is_remote_filesystem(_lowerCamelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_lowerCamelCase ) ,fs._strip_protocol(_lowerCamelCase ) ) else: fs.mv(_lowerCamelCase ,_lowerCamelCase ,recursive=_lowerCamelCase ) def snake_case ( ): if hasattr(fsspec.asyn ,"reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: UpperCAmelCase_ : Any = None UpperCAmelCase_ : int = None UpperCAmelCase_ : List[Any] = threading.Lock()
268
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = "huggingface/label-files" _lowerCAmelCase : int = "imagenet-1k-id2label.json" _lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} _lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" _lowerCAmelCase : Optional[int] = BitConfig( conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , ) return config def A ( _lowerCamelCase ): '''simple docstring''' if "stem.conv" in name: _lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: _lowerCAmelCase : Any = name.replace("blocks" , "layers" ) if "head.fc" in name: _lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): _lowerCAmelCase : Any = "bit." + name if "bit" not in name and "classifier" not in name: _lowerCAmelCase : Dict = "bit.encoder." + name return name def A ( ): '''simple docstring''' _lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ): '''simple docstring''' _lowerCAmelCase : Dict = get_config(_lowerCamelCase ) # load original model from timm _lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() # load state_dict of original model _lowerCAmelCase : Any = timm_model.state_dict() for key in state_dict.copy().keys(): _lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase ) _lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val # load HuggingFace model _lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.eval() model.load_state_dict(_lowerCamelCase ) # create image processor _lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) ) _lowerCAmelCase : Optional[int] = transform.transforms _lowerCAmelCase : Tuple = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _lowerCAmelCase : Tuple = BitImageProcessor( do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 ) _lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) # verify logits with torch.no_grad(): _lowerCAmelCase : Tuple = model(_lowerCamelCase ) _lowerCAmelCase : str = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) _lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(F"Pushing model {model_name} and processor to the hub" ) model.push_to_hub(F"ybelkada/{model_name}" ) processor.push_to_hub(F"ybelkada/{model_name}" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) _snake_case = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
36
0
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ) -> Dict: """simple docstring""" if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(_lowerCamelCase , n - 1 , _lowerCamelCase ) * a) % mod else: __lowerCamelCase = binary_exponentiation(_lowerCamelCase , n / 2 , _lowerCamelCase ) return (b * b) % mod # a prime number __A = 7_01 __A = 10_00_00_00_00 __A = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
90
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _snake_case = logging.get_logger(__name__) _snake_case = { "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCAmelCase_ ( a , a): lowerCamelCase__ = 'swin' lowerCamelCase__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ): '''simple docstring''' super().__init__(**__a) _lowerCAmelCase : Any = image_size _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : Tuple = num_channels _lowerCAmelCase : List[Any] = embed_dim _lowerCAmelCase : Tuple = depths _lowerCAmelCase : Optional[Any] = len(__a) _lowerCAmelCase : int = num_heads _lowerCAmelCase : int = window_size _lowerCAmelCase : int = mlp_ratio _lowerCAmelCase : List[Any] = qkv_bias _lowerCAmelCase : str = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : Any = drop_path_rate _lowerCAmelCase : int = hidden_act _lowerCAmelCase : Tuple = use_absolute_embeddings _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : Tuple = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__a) - 1)) _lowerCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1, len(__a) + 1)] _lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices( out_features=__a, out_indices=__a, stage_names=self.stage_names) class UpperCAmelCase_ ( a): lowerCamelCase__ = version.parse('1.11') @property def snake_case__ ( self): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def snake_case__ ( self): '''simple docstring''' return 1E-4
36
0
from pathlib import Path import fire def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ) -> List[str]: """simple docstring""" lowerCAmelCase__ = Path(_lowerCamelCase ) lowerCAmelCase__ = Path(_lowerCamelCase ) dest_dir.mkdir(exist_ok=_lowerCamelCase ) for path in src_dir.iterdir(): lowerCAmelCase__ = [x.rstrip() for x in list(path.open().readlines() )][:n] lowerCAmelCase__ = dest_dir.joinpath(path.name ) print(_lowerCamelCase ) dest_path.open("w" ).write("\n".join(_lowerCamelCase ) ) if __name__ == "__main__": fire.Fire(minify)
340
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
36
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase__ = StableDiffusionInpaintPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase__ = frozenset([] ) def _lowerCAmelCase (self :Optional[int] )-> Optional[int]: torch.manual_seed(0 ) __A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) __A = PNDMScheduler(skip_prk_steps=__a ) torch.manual_seed(0 ) __A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) __A = CLIPTextModel(__a ) __A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _lowerCAmelCase (self :List[str] , _UpperCamelCase :List[str] , _UpperCamelCase :Dict=0 )-> Tuple: __A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) __A = image.cpu().permute(0 , 2 , 3 , 1 )[0] __A = Image.fromarray(np.uinta(__a ) ).convert('''RGB''' ).resize((64, 64) ) __A = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(__a ).startswith('''mps''' ): __A = torch.manual_seed(__a ) else: __A = torch.Generator(device=__a ).manual_seed(__a ) __A = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowerCAmelCase (self :str )-> Union[str, Any]: __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.get_dummy_components() __A = StableDiffusionInpaintPipeline(**__a ) __A = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) __A = self.get_dummy_inputs(__a ) __A = sd_pipe(**__a ).images __A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __A = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase (self :Optional[int] )-> List[str]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A_ ( unittest.TestCase ): def _lowerCAmelCase (self :Optional[Any] )-> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase (self :Tuple )-> Optional[int]: __A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) __A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) __A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) __A = "stabilityai/stable-diffusion-2-inpainting" __A = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __A = "Face of a yellow cat, high resolution, sitting on a park bench" __A = torch.manual_seed(0 ) __A = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='''np''' , ) __A = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9e-3 def _lowerCAmelCase (self :Union[str, Any] )-> Tuple: __A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) __A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) __A = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) __A = "stabilityai/stable-diffusion-2-inpainting" __A = StableDiffusionInpaintPipeline.from_pretrained( __a , torch_dtype=torch.floataa , safety_checker=__a , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() __A = "Face of a yellow cat, high resolution, sitting on a park bench" __A = torch.manual_seed(0 ) __A = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='''np''' , ) __A = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase (self :int )-> Dict: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) __A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) __A = "stabilityai/stable-diffusion-2-inpainting" __A = PNDMScheduler.from_pretrained(__a , subfolder='''scheduler''' ) __A = StableDiffusionInpaintPipeline.from_pretrained( __a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __A = "Face of a yellow cat, high resolution, sitting on a park bench" __A = torch.manual_seed(0 ) __A = pipe( prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='''np''' , ) __A = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 10**9
117
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version _snake_case = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if got_ver is None or want_ver is None: raise ValueError( F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" F" reinstalling {pkg}." ) if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ): raise ImportError( F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def A ( _lowerCamelCase , _lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None else: _lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" F" got {requirement}" ) _lowerCAmelCase , _lowerCAmelCase : Dict = match[0] _lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements _lowerCAmelCase : Optional[int] = {} for w in want_range: _lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," F" but got {requirement}" ) _lowerCAmelCase , _lowerCAmelCase : Tuple = match[0] _lowerCAmelCase : Union[str, Any] = want_ver if op not in ops: raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" ) # special case if pkg == "python": _lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return # check if any version is installed try: _lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(_lowerCamelCase , _lowerCamelCase )
36
0
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase : Optional[Any] =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _lowercase (a_ , unittest.TestCase ): '''simple docstring''' lowercase__ = XLMRobertaTokenizer lowercase__ = XLMRobertaTokenizerFast lowercase__ = True lowercase__ = True def _lowerCamelCase ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase_ = XLMRobertaTokenizer(__a , keep_accents=__a ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = "<pad>" UpperCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(__a ) , 1002 ) def _lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = XLMRobertaTokenizer(__a , keep_accents=__a ) UpperCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) UpperCamelCase_ = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual( __a , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCamelCase_ = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def _lowerCamelCase ( self ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCamelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCamelCase_ = self.rust_tokenizer_class.from_pretrained(__a , **__a ) UpperCamelCase_ = self.tokenizer_class.from_pretrained(__a , **__a ) UpperCamelCase_ = tempfile.mkdtemp() UpperCamelCase_ = tokenizer_r.save_pretrained(__a ) UpperCamelCase_ = tokenizer_p.save_pretrained(__a ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) UpperCamelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(__a , __a ) # Checks everything loads correctly in the same way UpperCamelCase_ = tokenizer_r.from_pretrained(__a ) UpperCamelCase_ = tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__a ) # Save tokenizer rust, legacy_format=True UpperCamelCase_ = tempfile.mkdtemp() UpperCamelCase_ = tokenizer_r.save_pretrained(__a , legacy_format=__a ) UpperCamelCase_ = tokenizer_p.save_pretrained(__a ) # Checks it save with the same files self.assertSequenceEqual(__a , __a ) # Checks everything loads correctly in the same way UpperCamelCase_ = tokenizer_r.from_pretrained(__a ) UpperCamelCase_ = tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) shutil.rmtree(__a ) # Save tokenizer rust, legacy_format=False UpperCamelCase_ = tempfile.mkdtemp() UpperCamelCase_ = tokenizer_r.save_pretrained(__a , legacy_format=__a ) UpperCamelCase_ = tokenizer_p.save_pretrained(__a ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCamelCase_ = tokenizer_r.from_pretrained(__a ) UpperCamelCase_ = tokenizer_p.from_pretrained(__a ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__a , __a ) ) shutil.rmtree(__a ) @cached_property def _lowerCamelCase ( self ): '''simple docstring''' return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" ) def _lowerCamelCase ( self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__a , f.name ) UpperCamelCase_ = XLMRobertaTokenizer(f.name , keep_accents=__a ) UpperCamelCase_ = pickle.dumps(__a ) pickle.loads(__a ) def _lowerCamelCase ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return UpperCamelCase_ = self.get_tokenizer() UpperCamelCase_ = self.get_rust_tokenizer() UpperCamelCase_ = "I was born in 92000, and this is falsé." UpperCamelCase_ = tokenizer.tokenize(__a ) UpperCamelCase_ = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) UpperCamelCase_ = tokenizer.encode(__a , add_special_tokens=__a ) UpperCamelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) UpperCamelCase_ = self.get_rust_tokenizer() UpperCamelCase_ = tokenizer.encode(__a ) UpperCamelCase_ = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) @slow def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = "Hello World!" UpperCamelCase_ = [0, 3_5378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__a , self.big_tokenizer.encode(__a ) ) @slow def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) UpperCamelCase_ = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 17_9459, 12_4850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 1_0114, 711, 152, 20, 6, 5, 2_2376, 642, 1221, 1_5190, 3_4153, 450, 5608, 959, 1119, 5_7702, 136, 186, 47, 1098, 2_9367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 5_0901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__a , self.big_tokenizer.encode(__a ) ) @slow def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = {"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
128
import argparse from collections import defaultdict import yaml _snake_case = "docs/source/en/_toctree.yml" def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = defaultdict(_lowerCamelCase ) _lowerCAmelCase : Any = [] _lowerCAmelCase : List[str] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(_lowerCamelCase ) _lowerCAmelCase : Optional[Any] = new_doc_list _lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] _lowerCAmelCase : str = [] for duplicate_key in duplicates: _lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(_lowerCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) _lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_lowerCamelCase ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(_lowerCamelCase ) # Sort return overview_doc def A ( _lowerCamelCase=False ): '''simple docstring''' with open(_lowerCamelCase , encoding="utf-8" ) as f: _lowerCAmelCase : int = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase : List[str] = content[api_idx]["sections"] # Then to the model doc _lowerCAmelCase : Union[str, Any] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"] _lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase ) _lowerCAmelCase : int = False if new_scheduler_doc != scheduler_doc: _lowerCAmelCase : List[Any] = True if overwrite: _lowerCAmelCase : Dict = new_scheduler_doc if diff: if overwrite: _lowerCAmelCase : Tuple = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def A ( _lowerCamelCase=False ): '''simple docstring''' with open(_lowerCamelCase , encoding="utf-8" ) as f: _lowerCAmelCase : Tuple = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase : int = content[api_idx]["sections"] # Then to the model doc _lowerCAmelCase : List[str] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _lowerCAmelCase : Dict = False _lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"] _lowerCAmelCase : Tuple = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _lowerCAmelCase : List[Any] = pipeline_doc["section"] _lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase ) if overwrite: _lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc new_pipeline_docs.append(_lowerCamelCase ) # sort overall pipeline doc _lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase ) if new_pipeline_docs != pipeline_docs: _lowerCAmelCase : Dict = True if overwrite: _lowerCAmelCase : Optional[int] = new_pipeline_docs if diff: if overwrite: _lowerCAmelCase : Optional[int] = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _snake_case = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
36
0
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 lowercase : Union[str, Any] = get_tests_dir("""fixtures""") class __snake_case ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = mock.Mock() lowercase : int = 500 lowercase : Tuple = {} lowercase : str = HTTPError lowercase : Union[str, Any] = {} # Download this model to make sure it's in the cache. lowercase : Tuple = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" ,return_value=__a ) as mock_head: lowercase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' with self.assertRaises(__a ): # config is in subfolder, the following should not work without specifying the subfolder lowercase : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) lowercase : Optional[Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" ,subfolder="""feature_extractor""" ) self.assertIsNotNone(__a ) @is_staging_test class __snake_case ( unittest.TestCase ): @classmethod def _SCREAMING_SNAKE_CASE ( cls ): '''simple docstring''' lowercase : Union[str, Any] = TOKEN HfFolder.save_token(__a ) @classmethod def _SCREAMING_SNAKE_CASE ( cls ): '''simple docstring''' try: delete_repo(token=cls._token ,repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = ViTImageProcessor.from_pretrained(__a ) image_processor.push_to_hub("""test-image-processor""" ,use_auth_token=self._token ) lowercase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__a ,getattr(__a ,__a ) ) # Reset repo delete_repo(token=self._token ,repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a ,repo_id="""test-image-processor""" ,push_to_hub=__a ,use_auth_token=self._token ) lowercase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__a ,getattr(__a ,__a ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Any = ViTImageProcessor.from_pretrained(__a ) image_processor.push_to_hub("""valid_org/test-image-processor""" ,use_auth_token=self._token ) lowercase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__a ,getattr(__a ,__a ) ) # Reset repo delete_repo(token=self._token ,repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a ,repo_id="""valid_org/test-image-processor-org""" ,push_to_hub=__a ,use_auth_token=self._token ) lowercase : Optional[int] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__a ,getattr(__a ,__a ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() lowercase : List[str] = CustomImageProcessor.from_pretrained(__a ) image_processor.push_to_hub("""test-dynamic-image-processor""" ,use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map ,{"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} ,) lowercase : Tuple = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor" ,trust_remote_code=__a ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ ,"""CustomImageProcessor""" )
20
def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
36
0
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType a__ : Optional[Any] = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Any = "vision-encoder-decoder" snake_case__ : str = True def __init__( self : Any , **UpperCAmelCase__ : Dict ) -> Optional[Any]: super().__init__(**__a ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F"""A configuraton of type {self.model_type} cannot be instantiated because """ F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) __SCREAMING_SNAKE_CASE = kwargs.pop("encoder" ) __SCREAMING_SNAKE_CASE = encoder_config.pop("model_type" ) __SCREAMING_SNAKE_CASE = kwargs.pop("decoder" ) __SCREAMING_SNAKE_CASE = decoder_config.pop("model_type" ) __SCREAMING_SNAKE_CASE = AutoConfig.for_model(__a , **__a ) __SCREAMING_SNAKE_CASE = AutoConfig.for_model(__a , **__a ) __SCREAMING_SNAKE_CASE = True @classmethod def UpperCAmelCase_ ( cls : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , **UpperCAmelCase__ : Tuple ) -> List[str]: logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__a ) def UpperCAmelCase_ ( self : List[str] ) -> Any: __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE = self.encoder.to_dict() __SCREAMING_SNAKE_CASE = self.decoder.to_dict() __SCREAMING_SNAKE_CASE = self.__class__.model_type return output class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" snake_case__ : Any = version.parse("1.11") @property def UpperCAmelCase_ ( self : int ) -> str: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCAmelCase_ ( self : Any ) -> Any: return 1E-4 @property def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]: return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} ) class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @property def UpperCAmelCase_ ( self : int ) -> str: __SCREAMING_SNAKE_CASE = OrderedDict() __SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_decoder_sequence + sequence"} __SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_decoder_sequence + sequence"} __SCREAMING_SNAKE_CASE = {0: "batch", 1: "encoder_sequence"} return common_inputs def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict = -1 , UpperCAmelCase__ : Any = -1 , UpperCAmelCase__ : Optional[int] = False , UpperCAmelCase__ : List[Any] = None , ) -> Tuple: import torch __SCREAMING_SNAKE_CASE = OrderedDict() __SCREAMING_SNAKE_CASE = super().generate_dummy_inputs( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a ) __SCREAMING_SNAKE_CASE = dummy_input["input_ids"].shape __SCREAMING_SNAKE_CASE = (batch, encoder_sequence, self._config.encoder_hidden_size) __SCREAMING_SNAKE_CASE = dummy_input.pop("input_ids" ) __SCREAMING_SNAKE_CASE = dummy_input.pop("attention_mask" ) __SCREAMING_SNAKE_CASE = torch.zeros(__a ) return common_inputs class UpperCamelCase_ ( UpperCamelCase): """simple docstring""" @property def UpperCAmelCase_ ( self : Dict ) -> List[Any]: pass def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Tuple ) -> str: return VisionEncoderDecoderEncoderOnnxConfig(__a ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] = "default" ) -> int: __SCREAMING_SNAKE_CASE = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__a , __a )
54
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging _snake_case = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class UpperCAmelCase_ ( a): def __init__( self, __a = 101): '''simple docstring''' _lowerCAmelCase : str = length def __len__( self): '''simple docstring''' return self.length def __getitem__( self, __a): '''simple docstring''' return i class UpperCAmelCase_ : def __call__( self, __a): '''simple docstring''' return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)} class UpperCAmelCase_ ( nn.Module): def __init__( self): '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. _lowerCAmelCase : str = nn.Linear(120, 80) def snake_case__ ( self, __a, __a=None): '''simple docstring''' if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids class UpperCAmelCase_ ( a): @require_torch_neuroncore def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() _lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir() _lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split() _lowerCAmelCase : List[Any] = ["torchrun"] + distributed_args + args execute_subprocess_async(__a, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class UpperCAmelCase_ ( a): @require_torch_multi_gpu def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() _lowerCAmelCase : Any = self.get_auto_remove_tmp_dir() _lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split() _lowerCAmelCase : Any = ["torchrun"] + distributed_args + args execute_subprocess_async(__a, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py _snake_case = HfArgumentParser((TrainingArguments,)) _snake_case = parser.parse_args_into_dataclasses()[0] logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: _snake_case = DummyDataset(dataset_length) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = list(range(len(_lowerCamelCase ) ) ) _lowerCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} _snake_case = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) _snake_case = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _snake_case = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _snake_case = 2 _snake_case = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _snake_case = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _snake_case = None
36
0
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin lowercase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class __lowerCamelCase ( A__ , unittest.TestCase ): '''simple docstring''' a_ : Any = BartphoTokenizer a_ : Tuple = False a_ : Dict = True def lowerCamelCase ( self : Dict ): super().setUp() lowerCAmelCase_ : str = ["▁This", "▁is", "▁a", "▁t", "est"] lowerCAmelCase_ : List[str] = dict(zip(__a , range(len(__a ) ) ) ) lowerCAmelCase_ : Optional[Any] = {"unk_token": "<unk>"} lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] ) with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(f'''{token} {vocab_tokens[token]}\n''' ) lowerCAmelCase_ : Optional[Any] = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase ( self : Optional[int] , **a_ : Any ): kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__a ) def lowerCamelCase ( self : Union[str, Any] , a_ : Optional[Any] ): lowerCAmelCase_ : Union[str, Any] = "This is a là test" lowerCAmelCase_ : Optional[int] = "This is a<unk><unk> test" return input_text, output_text def lowerCamelCase ( self : Dict ): lowerCAmelCase_ : Optional[int] = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map ) lowerCAmelCase_ : List[Any] = "This is a là test" lowerCAmelCase_ : str = "▁This ▁is ▁a ▁l à ▁t est".split() lowerCAmelCase_ : str = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) lowerCAmelCase_ : Tuple = tokens + [tokenizer.unk_token] lowerCAmelCase_ : List[str] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
241
from __future__ import annotations import bisect def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' if hi < 0: _lowerCAmelCase : int = len(_lowerCamelCase ) while lo < hi: _lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: _lowerCAmelCase : Union[str, Any] = mid + 1 else: _lowerCAmelCase : str = mid return lo def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' if hi < 0: _lowerCAmelCase : str = len(_lowerCamelCase ) while lo < hi: _lowerCAmelCase : Tuple = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: _lowerCAmelCase : Dict = mid + 1 else: _lowerCAmelCase : str = mid return lo def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = 0 _lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1 while left <= right: _lowerCAmelCase : int = left + (right - left) // 2 _lowerCAmelCase : int = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: _lowerCAmelCase : str = midpoint - 1 else: _lowerCAmelCase : Any = midpoint + 1 return None def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase ) if index != len(_lowerCamelCase ) and sorted_collection[index] == item: return index return None def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if right < left: return None _lowerCAmelCase : Optional[int] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 ) else: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase ) if __name__ == "__main__": _snake_case = input("Enter numbers separated by comma:\n").strip() _snake_case = sorted(int(item) for item in user_input.split(",")) _snake_case = int(input("Enter a single number to be found in the list:\n")) _snake_case = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
36
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( lowerCamelCase_ , unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[str] = LDMTextToImagePipeline _SCREAMING_SNAKE_CASE : str = TEXT_TO_IMAGE_PARAMS - { 'negative_prompt', 'negative_prompt_embeds', 'cross_attention_kwargs', 'prompt_embeds', } _SCREAMING_SNAKE_CASE : Dict = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'callback', 'callback_steps', } _SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _SCREAMING_SNAKE_CASE : Tuple = False def _lowerCamelCase ( self ): """simple docstring""" torch.manual_seed(0 ) _lowercase : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _lowercase : Optional[int] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) _lowercase : int = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) _lowercase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowercase : Any = CLIPTextModel(__a ) _lowercase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _lowercase : List[Any] = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ): """simple docstring""" if str(__a ).startswith("mps" ): _lowercase : Any = torch.manual_seed(__a ) else: _lowercase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) _lowercase : Dict = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowerCamelCase ( self ): """simple docstring""" _lowercase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator _lowercase : Dict = self.get_dummy_components() _lowercase : Optional[Any] = LDMTextToImagePipeline(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) _lowercase : str = self.get_dummy_inputs(__a ) _lowercase : Optional[int] = pipe(**__a ).images _lowercase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _lowercase : Any = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class a__ ( unittest.TestCase ): def _lowerCamelCase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=torch.floataa , _UpperCamelCase=0 ): """simple docstring""" _lowercase : Optional[int] = torch.manual_seed(__a ) _lowercase : Any = np.random.RandomState(__a ).standard_normal((1, 4, 32, 32) ) _lowercase : Optional[int] = torch.from_numpy(__a ).to(device=__a , dtype=__a ) _lowercase : Tuple = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowerCamelCase ( self ): """simple docstring""" _lowercase : int = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__a ) pipe.set_progress_bar_config(disable=__a ) _lowercase : Union[str, Any] = self.get_inputs(__a ) _lowercase : List[Any] = pipe(**__a ).images _lowercase : str = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) _lowercase : List[str] = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] ) _lowercase : str = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class a__ ( unittest.TestCase ): def _lowerCamelCase ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=torch.floataa , _UpperCamelCase=0 ): """simple docstring""" _lowercase : Optional[Any] = torch.manual_seed(__a ) _lowercase : Dict = np.random.RandomState(__a ).standard_normal((1, 4, 32, 32) ) _lowercase : str = torch.from_numpy(__a ).to(device=__a , dtype=__a ) _lowercase : Dict = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowerCamelCase ( self ): """simple docstring""" _lowercase : int = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(__a ) pipe.set_progress_bar_config(disable=__a ) _lowercase : Optional[Any] = self.get_inputs(__a ) _lowercase : List[str] = pipe(**__a ).images[0] _lowercase : Optional[int] = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) _lowercase : Dict = np.abs(expected_image - image ).max() assert max_diff < 1E-3
250
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase_ ( a): def snake_case__ ( self, __a): '''simple docstring''' return 0.0 def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = 512 _lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs] _lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) ) _lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds _lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_lowerCamelCase ) plt.show() def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = 512 _lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1) _lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs] _lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) ) plt.show()
36
0
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" for char in word: A__ = ord(_lowerCamelCase ) if not _is_chinese_char(_lowerCamelCase ): return 0 return 1 def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" A__ = set() for token in tokens: A__ = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase ) if chinese_word: word_set.add(_lowerCamelCase ) A__ = list(_lowerCamelCase ) return word_list def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if not chinese_word_set: return bert_tokens A__ = max([len(_lowerCamelCase ) for w in chinese_word_set] ) A__ = bert_tokens A__ = 0, len(_lowerCamelCase ) while start < end: A__ = True if is_chinese(bert_word[start] ): A__ = min(end - start , _lowerCamelCase ) for i in range(_lowerCamelCase , 1 , -1 ): A__ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): A__ = "##" + bert_word[j] A__ = start + i A__ = False break if single_word: start += 1 return bert_word def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" A__ = [] for i in range(0 , len(_lowerCamelCase ) , 100 ): A__ = ltp_tokenizer.seg(lines[i : i + 100] )[0] A__ = [get_chinese_word(_lowerCamelCase ) for r in res] ltp_res.extend(_lowerCamelCase ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) A__ = [] for i in range(0 , len(_lowerCamelCase ) , 100 ): A__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 ) bert_res.extend(res['input_ids'] ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) A__ = [] for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ): A__ = [] for id in input_ids: A__ = bert_tokenizer._convert_id_to_token(_lowerCamelCase ) input_tokens.append(_lowerCamelCase ) A__ = add_sub_symbol(_lowerCamelCase , _lowerCamelCase ) A__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCamelCase ): if token[:2] == "##": A__ = token[2:] # save chinese tokens' pos if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ): ref_id.append(_lowerCamelCase ) ref_ids.append(_lowerCamelCase ) assert len(_lowerCamelCase ) == len(_lowerCamelCase ) return ref_ids def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" with open(args.file_name , 'r' , encoding='utf-8' ) as f: A__ = f.readlines() A__ = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A__ = LTP(args.ltp ) # faster in GPU device A__ = BertTokenizer.from_pretrained(args.bert ) A__ = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: A__ = [json.dumps(_lowerCamelCase ) + "\n" for ref in ref_ids] f.writelines(_lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" ) parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") __lowerCamelCase = parser.parse_args() main(args)
221
def A ( _lowerCamelCase ): '''simple docstring''' if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence _lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase ) # # convert them to integers for i in range(len(_lowerCamelCase ) ): _lowerCAmelCase : List[str] = int(sequence[i] , 2 ) return sequence def A ( _lowerCamelCase ): '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] _lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits _lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 ) _lowerCAmelCase : str = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): _lowerCAmelCase : Dict = "0" + smaller_sequence[i] sequence.append(_lowerCamelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): _lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i] sequence.append(_lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
36
0
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase ( snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ) -> List[str]: UpperCamelCase : List[Any] = LxmertConfig.from_json_file(_lowerCamelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) UpperCamelCase : int = LxmertForPreTraining(_lowerCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _lowerCamelCase ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
119
from PIL import Image def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : int = image.size _lowerCAmelCase : Any = 0 _lowerCAmelCase : Tuple = image.load() for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): _lowerCAmelCase : Union[str, Any] = pixels[j, i] mean += pixel mean //= width * height for j in range(_lowerCamelCase ): for i in range(_lowerCamelCase ): _lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": _snake_case = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
36
0
"""simple docstring""" import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = FunnelTokenizer __magic_name__ = FunnelTokenizerFast __magic_name__ = True __magic_name__ = True def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: super().setUp() UpperCAmelCase_ : Any = [ "<unk>", "<cls>", "<sep>", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowerCAmelCase_ : List[str] ) -> Optional[Any]: return FunnelTokenizer.from_pretrained(self.tmpdirname , **__a ) def _SCREAMING_SNAKE_CASE ( self : Dict , **lowerCAmelCase_ : Dict ) -> int: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> int: UpperCAmelCase_ : Union[str, Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : Tuple = "unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: UpperCAmelCase_ : List[Any] = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Dict = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(__a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [7, 4, 5, 10, 8, 9] ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: UpperCAmelCase_ : Union[str, Any] = self.get_tokenizers(do_lower_case=__a ) for tokenizer in tokenizers: UpperCAmelCase_ : Optional[Any] = tokenizer("UNwant\u00E9d,running" ) UpperCAmelCase_ : Union[str, Any] = len(inputs["input_ids"] ) - 1 self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len ) UpperCAmelCase_ : Any = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" ) self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
268
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class UpperCAmelCase_ ( a): lowerCamelCase__ = 'wav2vec2' def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ): '''simple docstring''' super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a) _lowerCAmelCase : str = hidden_size _lowerCAmelCase : Optional[int] = feat_extract_norm _lowerCAmelCase : Union[str, Any] = feat_extract_activation _lowerCAmelCase : Optional[Any] = list(__a) _lowerCAmelCase : List[str] = list(__a) _lowerCAmelCase : str = list(__a) _lowerCAmelCase : List[str] = conv_bias _lowerCAmelCase : str = num_conv_pos_embeddings _lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups _lowerCAmelCase : str = len(self.conv_dim) _lowerCAmelCase : List[str] = num_hidden_layers _lowerCAmelCase : str = intermediate_size _lowerCAmelCase : Any = hidden_act _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : Optional[Any] = hidden_dropout _lowerCAmelCase : List[str] = attention_dropout _lowerCAmelCase : Tuple = activation_dropout _lowerCAmelCase : int = feat_proj_dropout _lowerCAmelCase : List[str] = final_dropout _lowerCAmelCase : int = layerdrop _lowerCAmelCase : int = layer_norm_eps _lowerCAmelCase : Union[str, Any] = initializer_range _lowerCAmelCase : str = vocab_size _lowerCAmelCase : Optional[Any] = do_stable_layer_norm _lowerCAmelCase : Any = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase : str = apply_spec_augment _lowerCAmelCase : Optional[Any] = mask_time_prob _lowerCAmelCase : Optional[int] = mask_time_length _lowerCAmelCase : List[str] = mask_time_min_masks _lowerCAmelCase : Optional[int] = mask_feature_prob _lowerCAmelCase : Optional[int] = mask_feature_length _lowerCAmelCase : List[str] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCAmelCase : Union[str, Any] = num_codevectors_per_group _lowerCAmelCase : str = num_codevector_groups _lowerCAmelCase : Optional[int] = contrastive_logits_temperature _lowerCAmelCase : Optional[int] = feat_quantizer_dropout _lowerCAmelCase : Optional[int] = num_negatives _lowerCAmelCase : Union[str, Any] = codevector_dim _lowerCAmelCase : Any = proj_codevector_dim _lowerCAmelCase : Optional[int] = diversity_loss_weight # ctc loss _lowerCAmelCase : Tuple = ctc_loss_reduction _lowerCAmelCase : Tuple = ctc_zero_infinity # adapter _lowerCAmelCase : List[Any] = add_adapter _lowerCAmelCase : List[str] = adapter_kernel_size _lowerCAmelCase : str = adapter_stride _lowerCAmelCase : List[str] = num_adapter_layers _lowerCAmelCase : str = output_hidden_size or hidden_size _lowerCAmelCase : Tuple = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase : str = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase : str = list(__a) _lowerCAmelCase : Union[str, Any] = list(__a) _lowerCAmelCase : List[str] = list(__a) _lowerCAmelCase : Tuple = xvector_output_dim @property def snake_case__ ( self): '''simple docstring''' return functools.reduce(operator.mul, self.conv_stride, 1)
36
0
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> int: """simple docstring""" with open(_lowerCamelCase ) as metadata_file: __lowerCamelCase = json.load(_lowerCamelCase ) __lowerCamelCase = LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __lowerCamelCase = torch.load(_lowerCamelCase , map_location='cpu' )["module"] # Load the entity vocab file __lowerCamelCase = load_original_entity_vocab(_lowerCamelCase ) # add an entry for [MASK2] __lowerCamelCase = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 __lowerCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __lowerCamelCase = AddedToken('<ent>' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) __lowerCamelCase = AddedToken('<ent2>' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(_lowerCamelCase ) with open(os.path.join(_lowerCamelCase , 'tokenizer_config.json' ) , 'r' ) as f: __lowerCamelCase = json.load(_lowerCamelCase ) __lowerCamelCase = "MLukeTokenizer" with open(os.path.join(_lowerCamelCase , 'tokenizer_config.json' ) , 'w' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) with open(os.path.join(_lowerCamelCase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) __lowerCamelCase = MLukeTokenizer.from_pretrained(_lowerCamelCase ) # Initialize the embeddings of the special tokens __lowerCamelCase = tokenizer.convert_tokens_to_ids(['@'] )[0] __lowerCamelCase = tokenizer.convert_tokens_to_ids(['#'] )[0] __lowerCamelCase = state_dict["embeddings.word_embeddings.weight"] __lowerCamelCase = word_emb[ent_init_index].unsqueeze(0 ) __lowerCamelCase = word_emb[enta_init_index].unsqueeze(0 ) __lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: __lowerCamelCase = state_dict[bias_name] __lowerCamelCase = decoder_bias[ent_init_index].unsqueeze(0 ) __lowerCamelCase = decoder_bias[enta_init_index].unsqueeze(0 ) __lowerCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __lowerCamelCase = F"""encoder.layer.{layer_index}.attention.self.""" __lowerCamelCase = state_dict[prefix + matrix_name] __lowerCamelCase = state_dict[prefix + matrix_name] __lowerCamelCase = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __lowerCamelCase = state_dict["entity_embeddings.entity_embeddings.weight"] __lowerCamelCase = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) __lowerCamelCase = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' __lowerCamelCase = state_dict["entity_predictions.bias"] __lowerCamelCase = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) __lowerCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] ) __lowerCamelCase = LukeForMaskedLM(config=_lowerCamelCase ).eval() state_dict.pop('entity_predictions.decoder.weight' ) state_dict.pop('lm_head.decoder.weight' ) state_dict.pop('lm_head.decoder.bias' ) __lowerCamelCase = OrderedDict() for key, value in state_dict.items(): if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )): __lowerCamelCase = state_dict[key] else: __lowerCamelCase = state_dict[key] __lowerCamelCase = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) if set(_lowerCamelCase ) != {"luke.embeddings.position_ids"}: raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" ) if set(_lowerCamelCase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs __lowerCamelCase = MLukeTokenizer.from_pretrained(_lowerCamelCase , task='entity_classification' ) __lowerCamelCase = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." __lowerCamelCase = (0, 9) __lowerCamelCase = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors='pt' ) __lowerCamelCase = model(**_lowerCamelCase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base __lowerCamelCase = torch.Size((1, 33, 768) ) __lowerCamelCase = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base __lowerCamelCase = torch.Size((1, 1, 768) ) __lowerCamelCase = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" F""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ): raise ValueError # Verify masked word/entity prediction __lowerCamelCase = MLukeTokenizer.from_pretrained(_lowerCamelCase ) __lowerCamelCase = "Tokyo is the capital of <mask>." __lowerCamelCase = (24, 30) __lowerCamelCase = tokenizer(_lowerCamelCase , entity_spans=[span] , return_tensors='pt' ) __lowerCamelCase = model(**_lowerCamelCase ) __lowerCamelCase = encoding["input_ids"][0].tolist() __lowerCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) ) __lowerCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_lowerCamelCase ) __lowerCamelCase = outputs.entity_logits[0][0].argmax().item() __lowerCamelCase = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(_lowerCamelCase ) ) model.save_pretrained(_lowerCamelCase ) def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Tuple: """simple docstring""" __lowerCamelCase = ["[MASK]", "[PAD]", "[UNK]"] __lowerCamelCase = [json.loads(_lowerCamelCase ) for line in open(_lowerCamelCase )] __lowerCamelCase = {} for entry in data: __lowerCamelCase = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: __lowerCamelCase = entity_id break __lowerCamelCase = F"""{language}:{entity_name}""" __lowerCamelCase = entity_id return new_mapping if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) __A = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
90
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , ) class UpperCAmelCase_ ( a): lowerCamelCase__ = RobertaConfig lowerCamelCase__ = 'roberta' def __init__( self, __a): '''simple docstring''' super().__init__(__a) _lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , ) class UpperCAmelCase_ ( a): lowerCamelCase__ = RobertaConfig lowerCamelCase__ = 'roberta' def __init__( self, __a): '''simple docstring''' super().__init__(__a) _lowerCAmelCase : Optional[int] = config.num_labels _lowerCAmelCase : Optional[int] = config.num_hidden_layers _lowerCAmelCase : Optional[int] = DeeRobertaModel(__a) _lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob) _lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels) @add_start_docstrings_to_model_forward(__a) def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.num_layers try: _lowerCAmelCase : List[Any] = self.roberta( __a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, ) _lowerCAmelCase : List[Any] = outputs[1] _lowerCAmelCase : Dict = self.dropout(__a) _lowerCAmelCase : Dict = self.classifier(__a) _lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowerCAmelCase : Tuple = e.message _lowerCAmelCase : Union[str, Any] = e.exit_layer _lowerCAmelCase : List[Any] = outputs[0] if not self.training: _lowerCAmelCase : int = entropy(__a) _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : str = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowerCAmelCase : Optional[Any] = MSELoss() _lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1)) else: _lowerCAmelCase : Optional[Any] = CrossEntropyLoss() _lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits _lowerCAmelCase : Optional[int] = [] for highway_exit in outputs[-1]: _lowerCAmelCase : Any = highway_exit[0] if not self.training: highway_logits_all.append(__a) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression _lowerCAmelCase : List[str] = MSELoss() _lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1)) else: _lowerCAmelCase : Dict = CrossEntropyLoss() _lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(__a) if train_highway: _lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: _lowerCAmelCase : Any = (loss,) + outputs if not self.training: _lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowerCAmelCase : Optional[Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
36
0
import math def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ) -> List[Any]: """simple docstring""" if ( not isinstance(_lowerCamelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> int: """simple docstring""" if ( not isinstance(_lowerCamelCase , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
340
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( a): lowerCamelCase__ = 'vision-encoder-decoder' lowerCamelCase__ = True def __init__( self, **__a): '''simple docstring''' super().__init__(**__a) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}") _lowerCAmelCase : str = kwargs.pop("encoder") _lowerCAmelCase : Any = encoder_config.pop("model_type") _lowerCAmelCase : str = kwargs.pop("decoder") _lowerCAmelCase : List[str] = decoder_config.pop("model_type") _lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : Optional[int] = True @classmethod def snake_case__ ( cls, __a, __a, **__a): '''simple docstring''' logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config") _lowerCAmelCase : Optional[Any] = True _lowerCAmelCase : str = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = copy.deepcopy(self.__dict__) _lowerCAmelCase : List[str] = self.encoder.to_dict() _lowerCAmelCase : List[str] = self.decoder.to_dict() _lowerCAmelCase : Any = self.__class__.model_type return output class UpperCAmelCase_ ( a): lowerCamelCase__ = version.parse('1.11') @property def snake_case__ ( self): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def snake_case__ ( self): '''simple docstring''' return 1E-4 @property def snake_case__ ( self): '''simple docstring''' return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}}) class UpperCAmelCase_ ( a): @property def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = OrderedDict() _lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"} _lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"} _lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"} return common_inputs def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ): '''simple docstring''' import torch _lowerCAmelCase : Optional[Any] = OrderedDict() _lowerCAmelCase : List[str] = super().generate_dummy_inputs( __a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a) _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape _lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size) _lowerCAmelCase : List[str] = dummy_input.pop("input_ids") _lowerCAmelCase : List[str] = dummy_input.pop("attention_mask") _lowerCAmelCase : Optional[int] = torch.zeros(__a) return common_inputs class UpperCAmelCase_ ( a): @property def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self, __a): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(__a) def snake_case__ ( self, __a, __a, __a = "default"): '''simple docstring''' _lowerCAmelCase : Dict = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
36
0
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _a ( lowerCamelCase: Optional[int] ) -> Dict: '''simple docstring''' __A = SwinConfig(image_size=1_92 ) if "base" in model_name: __A = 6 __A = 1_28 __A = (2, 2, 18, 2) __A = (4, 8, 16, 32) elif "large" in model_name: __A = 12 __A = 1_92 __A = (2, 2, 18, 2) __A = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) __A = window_size __A = embed_dim __A = depths __A = num_heads return config def _a ( lowerCamelCase: Optional[Any] ) -> int: '''simple docstring''' if "encoder.mask_token" in name: __A = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: __A = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: __A = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: __A = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __A = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __A = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __A = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __A = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __A = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": __A = "layernorm.weight" if name == "encoder.norm.bias": __A = "layernorm.bias" if "decoder" in name: pass else: __A = "swin." + name return name def _a ( lowerCamelCase: Any , lowerCamelCase: List[Any] ) -> Optional[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): __A = orig_state_dict.pop(_lowerCamelCase ) if "attn_mask" in key: pass elif "qkv" in key: __A = key.split('''.''' ) __A = int(key_split[2] ) __A = int(key_split[4] ) __A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __A = val[:dim, :] __A = val[ dim : dim * 2, : ] __A = val[-dim:, :] else: __A = val[ :dim ] __A = val[ dim : dim * 2 ] __A = val[ -dim: ] else: __A = val return orig_state_dict def _a ( lowerCamelCase: Dict , lowerCamelCase: Dict , lowerCamelCase: Optional[int] , lowerCamelCase: int ) -> List[Any]: '''simple docstring''' __A = torch.load(_lowerCamelCase , map_location='''cpu''' )["model"] __A = get_swin_config(_lowerCamelCase ) __A = SwinForMaskedImageModeling(_lowerCamelCase ) model.eval() __A = convert_state_dict(_lowerCamelCase , _lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) __A = "http://images.cocodataset.org/val2017/000000039769.jpg" __A = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} ) __A = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) __A = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ) with torch.no_grad(): __A = model(**_lowerCamelCase ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": snake_case__ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) snake_case__ : Any = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
117
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class UpperCAmelCase_ ( a): def __get__( self, __a, __a=None): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") _lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__ _lowerCAmelCase : Dict = getattr(__a, __a, __a) if cached is None: _lowerCAmelCase : str = self.fget(__a) setattr(__a, __a, __a) return cached def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Any = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"invalid truth value {val!r}" ) def A ( _lowerCamelCase ): '''simple docstring''' if is_torch_fx_proxy(_lowerCamelCase ): return True if is_torch_available(): import torch if isinstance(_lowerCamelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(_lowerCamelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ): return True return isinstance(_lowerCamelCase , np.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return isinstance(_lowerCamelCase , np.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return _is_numpy(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch return isinstance(_lowerCamelCase , torch.Tensor ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch return isinstance(_lowerCamelCase , torch.device ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_device(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch if isinstance(_lowerCamelCase , _lowerCamelCase ): if hasattr(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ) else: return False return isinstance(_lowerCamelCase , torch.dtype ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import tensorflow as tf return isinstance(_lowerCamelCase , tf.Tensor ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_lowerCamelCase , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(_lowerCamelCase ) return type(_lowerCamelCase ) == tf.Tensor def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import jax.numpy as jnp # noqa: F811 return isinstance(_lowerCamelCase , jnp.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_flax_available() else _is_jax(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' if isinstance(_lowerCamelCase , (dict, UserDict) ): return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple) ): return [to_py_obj(_lowerCamelCase ) for o in obj] elif is_tf_tensor(_lowerCamelCase ): return obj.numpy().tolist() elif is_torch_tensor(_lowerCamelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(_lowerCamelCase ): return np.asarray(_lowerCamelCase ).tolist() elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def A ( _lowerCamelCase ): '''simple docstring''' if isinstance(_lowerCamelCase , (dict, UserDict) ): return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple) ): return np.array(_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): return obj.numpy() elif is_torch_tensor(_lowerCamelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(_lowerCamelCase ): return np.asarray(_lowerCamelCase ) else: return obj class UpperCAmelCase_ ( a): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = fields(self) # Safety and consistency checks if not len(__a): raise ValueError(f"{self.__class__.__name__} has no fields.") if not all(field.default is None for field in class_fields[1:]): raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") _lowerCAmelCase : Dict = getattr(self, class_fields[0].name) _lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(__a): if isinstance(__a, __a): _lowerCAmelCase : Tuple = first_field.items() _lowerCAmelCase : Dict = True else: try: _lowerCAmelCase : Dict = iter(__a) _lowerCAmelCase : Any = True except TypeError: _lowerCAmelCase : Any = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__a): if ( not isinstance(__a, (list, tuple)) or not len(__a) == 2 or not isinstance(element[0], __a) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute _lowerCAmelCase : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"Cannot set key/value for {element}. It needs to be a tuple (key, value).") break setattr(self, element[0], element[1]) if element[1] is not None: _lowerCAmelCase : Any = element[1] elif first_field is not None: _lowerCAmelCase : Any = first_field else: for field in class_fields: _lowerCAmelCase : Dict = getattr(self, field.name) if v is not None: _lowerCAmelCase : Union[str, Any] = v def __delitem__( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__( self, __a): '''simple docstring''' if isinstance(__a, __a): _lowerCAmelCase : Optional[int] = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self, __a, __a): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__a, __a) super().__setattr__(__a, __a) def __setitem__( self, __a, __a): '''simple docstring''' super().__setitem__(__a, __a) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__a, __a) def snake_case__ ( self): '''simple docstring''' return tuple(self[k] for k in self.keys()) class UpperCAmelCase_ ( a , a): @classmethod def snake_case__ ( cls, __a): '''simple docstring''' raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}") class UpperCAmelCase_ ( a): lowerCamelCase__ = 'longest' lowerCamelCase__ = 'max_length' lowerCamelCase__ = 'do_not_pad' class UpperCAmelCase_ ( a): lowerCamelCase__ = 'pt' lowerCamelCase__ = 'tf' lowerCamelCase__ = 'np' lowerCamelCase__ = 'jax' class UpperCAmelCase_ : def __init__( self, __a): '''simple docstring''' _lowerCAmelCase : Tuple = context_managers _lowerCAmelCase : Dict = ExitStack() def __enter__( self): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(__a) def __exit__( self, *__a, **__a): '''simple docstring''' self.stack.__exit__(*__a, **__a) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : str = infer_framework(_lowerCamelCase ) if framework == "tf": _lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : str = model_class.__name__ _lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase ) if framework == "tf": _lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ): '''simple docstring''' def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ): for k, v in d.items(): _lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k if v and isinstance(_lowerCamelCase , _lowerCamelCase ): yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items() else: yield key, v return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) @contextmanager def A ( _lowerCamelCase , _lowerCamelCase = False ): '''simple docstring''' if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def A ( _lowerCamelCase , _lowerCamelCase=None ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.transpose(_lowerCamelCase , axes=_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.T if axes is None else array.permute(*_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase ) else: raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.reshape(_lowerCamelCase , _lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.reshape(*_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.reshape(_lowerCamelCase , _lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.reshape(_lowerCamelCase , _lowerCamelCase ) else: raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase=None ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) else: raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.expand_dims(_lowerCamelCase , _lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.unsqueeze(dim=_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase ) else: raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.size(_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.numel() elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.size(_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return array.size else: raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' for key, value in auto_map.items(): if isinstance(_lowerCamelCase , (tuple, list) ): _lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: _lowerCAmelCase : Tuple = F"{repo_id}--{value}" return auto_map def A ( _lowerCamelCase ): '''simple docstring''' for base_class in inspect.getmro(_lowerCamelCase ): _lowerCAmelCase : Tuple = base_class.__module__ _lowerCAmelCase : int = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"Could not infer framework from class {model_class}." )
36
0
def _lowerCAmelCase (_lowerCAmelCase = 50): UpperCamelCase_ = [[0] * 3 for _ in range(length + 1)] for row_length in range(length + 1): for tile_length in range(2 , 5): for tile_start in range(row_length - tile_length + 1): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length]) if __name__ == "__main__": print(F"{solution() = }")
128
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if expected is RuntimeError: with pytest.raises(_lowerCamelCase ): _number_of_shards_in_gen_kwargs(_lowerCamelCase ) else: _lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase ) assert out == expected
36
0
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 lowercase : Dict = sys.version_info >= (3, 10) def _snake_case( SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=_lowerCamelCase ) @dataclass class __snake_case : _a : Optional[int]= 42 _a : Dict= 42 _a : Tuple= 42 _a : int= 42 @dataclass class __snake_case : _a : Dict= 42 _a : Any= field(default="toto" , metadata={"help": "help message"} ) @dataclass class __snake_case : _a : Dict= False _a : List[Any]= True _a : Dict= None class __snake_case ( lowerCAmelCase ): _a : Tuple= "titi" _a : Optional[int]= "toto" class __snake_case ( lowerCAmelCase ): _a : str= "titi" _a : Union[str, Any]= "toto" _a : List[str]= 42 @dataclass class __snake_case : _a : Optional[int]= "toto" def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = BasicEnum(self.foo ) @dataclass class __snake_case : _a : List[Any]= "toto" def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = MixedTypeEnum(self.foo ) @dataclass class __snake_case : _a : Dict= None _a : int= field(default=lowerCAmelCase , metadata={"help": "help message"} ) _a : List[str]= None _a : Tuple= list_field(default=[] ) _a : Tuple= list_field(default=[] ) @dataclass class __snake_case : _a : List[Any]= list_field(default=[] ) _a : List[Any]= list_field(default=[1, 2, 3] ) _a : Dict= list_field(default=["Hallo", "Bonjour", "Hello"] ) _a : Dict= list_field(default=[0.1, 0.2, 0.3] ) @dataclass class __snake_case : _a : List[str]= field() _a : int= field() _a : int= field() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = BasicEnum(self.required_enum ) @dataclass class __snake_case : _a : Dict= 42 _a : Tuple= field() _a : List[Any]= None _a : Dict= field(default="toto" , metadata={"help": "help message"} ) _a : Optional[Any]= list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class __snake_case : _a : Optional[int]= False _a : int= True _a : Optional[Any]= None @dataclass class __snake_case : _a : Optional[Any]= None _a : int= field(default=lowerCAmelCase , metadata={"help": "help message"} ) _a : Dict= None _a : Union[str, Any]= list_field(default=[] ) _a : Optional[int]= list_field(default=[] ) class __snake_case ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' self.assertEqual(len(a._actions ) ,len(b._actions ) ) for x, y in zip(a._actions ,b._actions ): lowercase : int = {k: v for k, v in vars(__a ).items() if k != "container"} lowercase : Dict = {k: v for k, v in vars(__a ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" ,__a ) and yy.get("""choices""" ,__a ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](__a ) ,yy["""type"""](__a ) ) del xx["type"], yy["type"] self.assertEqual(__a ,__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[str] = HfArgumentParser(__a ) lowercase : List[Any] = argparse.ArgumentParser() expected.add_argument("""--foo""" ,type=__a ,required=__a ) expected.add_argument("""--bar""" ,type=__a ,required=__a ) expected.add_argument("""--baz""" ,type=__a ,required=__a ) expected.add_argument("""--flag""" ,type=__a ,default=__a ,const=__a ,nargs="""?""" ) self.argparsersEqual(__a ,__a ) lowercase : int = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] (lowercase ) : str = parser.parse_args_into_dataclasses(__a ,look_for_args_file=__a ) self.assertFalse(example.flag ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = HfArgumentParser(__a ) lowercase : Any = argparse.ArgumentParser() expected.add_argument("""--foo""" ,default=42 ,type=__a ) expected.add_argument("""--baz""" ,default="""toto""" ,type=__a ,help="""help message""" ) self.argparsersEqual(__a ,__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[str] = argparse.ArgumentParser() expected.add_argument("""--foo""" ,type=__a ,default=__a ,const=__a ,nargs="""?""" ) expected.add_argument("""--baz""" ,type=__a ,default=__a ,const=__a ,nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" ,action="""store_false""" ,default=__a ,dest="""baz""" ) expected.add_argument("""--opt""" ,type=__a ,default=__a ) lowercase : List[str] = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__a ) for dataclass_type in dataclass_types: lowercase : Any = HfArgumentParser(__a ) self.argparsersEqual(__a ,__a ) lowercase : int = parser.parse_args([] ) self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) ) lowercase : int = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) ) lowercase : Optional[int] = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) ) lowercase : Dict = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) ) lowercase : str = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(__a ,Namespace(foo=__a ,baz=__a ,opt=__a ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = HfArgumentParser(__a ) lowercase : str = argparse.ArgumentParser() expected.add_argument( """--foo""" ,default="""toto""" ,choices=["""titi""", """toto""", 42] ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,) self.argparsersEqual(__a ,__a ) lowercase : str = parser.parse_args([] ) self.assertEqual(args.foo ,"""toto""" ) lowercase : Any = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto ) lowercase : Optional[int] = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo ,"""titi""" ) lowercase : List[Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi ) lowercase : Union[str, Any] = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo ,42 ) lowercase : Optional[Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' @dataclass class __snake_case : _a : Optional[Any]= "toto" lowercase : Tuple = HfArgumentParser(__a ) lowercase : Union[str, Any] = argparse.ArgumentParser() expected.add_argument( """--foo""" ,default="""toto""" ,choices=("""titi""", """toto""", 42) ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,) self.argparsersEqual(__a ,__a ) lowercase : Any = parser.parse_args([] ) self.assertEqual(args.foo ,"""toto""" ) lowercase : Optional[Any] = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo ,"""titi""" ) lowercase : Optional[int] = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo ,42 ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[str] = HfArgumentParser(__a ) lowercase : Any = argparse.ArgumentParser() expected.add_argument("""--foo_int""" ,nargs="""+""" ,default=[] ,type=__a ) expected.add_argument("""--bar_int""" ,nargs="""+""" ,default=[1, 2, 3] ,type=__a ) expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=__a ) expected.add_argument("""--foo_float""" ,nargs="""+""" ,default=[0.1, 0.2, 0.3] ,type=__a ) self.argparsersEqual(__a ,__a ) lowercase : Optional[Any] = parser.parse_args([] ) self.assertEqual( __a ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["""Hallo""", """Bonjour""", """Hello"""] ,foo_float=[0.1, 0.2, 0.3] ) ,) lowercase : Optional[Any] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(__a ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["""a""", """b""", """c"""] ,foo_float=[0.1, 0.7] ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Dict = argparse.ArgumentParser() expected.add_argument("""--foo""" ,default=__a ,type=__a ) expected.add_argument("""--bar""" ,default=__a ,type=__a ,help="""help message""" ) expected.add_argument("""--baz""" ,default=__a ,type=__a ) expected.add_argument("""--ces""" ,nargs="""+""" ,default=[] ,type=__a ) expected.add_argument("""--des""" ,nargs="""+""" ,default=[] ,type=__a ) lowercase : Optional[Any] = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__a ) for dataclass_type in dataclass_types: lowercase : Tuple = HfArgumentParser(__a ) self.argparsersEqual(__a ,__a ) lowercase : int = parser.parse_args([] ) self.assertEqual(__a ,Namespace(foo=__a ,bar=__a ,baz=__a ,ces=[] ,des=[] ) ) lowercase : List[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(__a ,Namespace(foo=12 ,bar=3.14 ,baz="""42""" ,ces=["""a""", """b""", """c"""] ,des=[1, 2, 3] ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = HfArgumentParser(__a ) lowercase : Any = argparse.ArgumentParser() expected.add_argument("""--required_list""" ,nargs="""+""" ,type=__a ,required=__a ) expected.add_argument("""--required_str""" ,type=__a ,required=__a ) expected.add_argument( """--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=__a ,) self.argparsersEqual(__a ,__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Any = HfArgumentParser(__a ) lowercase : Optional[int] = argparse.ArgumentParser() expected.add_argument("""--foo""" ,type=__a ,required=__a ) expected.add_argument( """--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=__a ,) expected.add_argument("""--opt""" ,type=__a ,default=__a ) expected.add_argument("""--baz""" ,default="""toto""" ,type=__a ,help="""help message""" ) expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=__a ) self.argparsersEqual(__a ,__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Dict = HfArgumentParser(__a ) lowercase : Tuple = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } lowercase : List[str] = parser.parse_dict(__a )[0] lowercase : Optional[int] = BasicExample(**__a ) self.assertEqual(__a ,__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : str = HfArgumentParser(__a ) lowercase : Optional[int] = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(__a ,parser.parse_dict ,__a ,allow_extra_keys=__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = HfArgumentParser(__a ) lowercase : Tuple = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase : Dict = os.path.join(__a ,"""temp_json""" ) os.mkdir(__a ) with open(temp_local_path + """.json""" ,"""w+""" ) as f: json.dump(__a ,__a ) lowercase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] lowercase : str = BasicExample(**__a ) self.assertEqual(__a ,__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : str = HfArgumentParser(__a ) lowercase : int = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowercase : Any = os.path.join(__a ,"""temp_yaml""" ) os.mkdir(__a ) with open(temp_local_path + """.yaml""" ,"""w+""" ) as f: yaml.dump(__a ,__a ) lowercase : str = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] lowercase : Optional[int] = BasicExample(**__a ) self.assertEqual(__a ,__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[str] = HfArgumentParser(__a ) self.assertIsNotNone(__a )
20
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class UpperCAmelCase_ : def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"): '''simple docstring''' _lowerCAmelCase : Optional[int] = device _lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a) _lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073] _lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std) _lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224) _lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.resize(__a) _lowerCAmelCase : List[str] = self.center_crop(__a) _lowerCAmelCase : Optional[Any] = self.normalize(__a) return images def __call__( self, __a=None, __a=None, **__a): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(text=__a, **__a) _lowerCAmelCase : List[str] = self.preprocess_img(__a) _lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class UpperCAmelCase_ ( nn.Module): def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ): '''simple docstring''' super().__init__() _lowerCAmelCase : List[str] = None _lowerCAmelCase : List[str] = device if device else get_device() if vqgan: _lowerCAmelCase : Union[str, Any] = vqgan else: _lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a) self.vqgan.eval() if clip: _lowerCAmelCase : str = clip else: _lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") self.clip.to(self.device) _lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device) _lowerCAmelCase : Any = iterations _lowerCAmelCase : List[Any] = lr _lowerCAmelCase : Tuple = log _lowerCAmelCase : List[str] = make_grid _lowerCAmelCase : int = return_val _lowerCAmelCase : Dict = quantize _lowerCAmelCase : Any = self.vqgan.decoder.z_shape def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [] if output_path is None: _lowerCAmelCase : List[Any] = "./animation.gif" if input_path is None: _lowerCAmelCase : str = self.save_path _lowerCAmelCase : str = sorted(glob(input_path + "/*")) if not len(__a): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)") if len(__a) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)") _lowerCAmelCase : Optional[int] = total_duration / len(__a) _lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a) if extend_frames: _lowerCAmelCase : Any = 1.5 _lowerCAmelCase : List[str] = 3 for file_name in paths: if file_name.endswith(".png"): images.append(imageio.imread(__a)) imageio.mimsave(__a, __a, duration=__a) print(f"gif saved to {output_path}") def snake_case__ ( self, __a=None, __a=None): '''simple docstring''' if not (path or img): raise ValueError("Input either path or tensor") if img is not None: raise NotImplementedError _lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device) _lowerCAmelCase : Dict = preprocess_vqgan(__a) _lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a) return z def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_() _lowerCAmelCase : Dict = base_latent + transform_vector if self.quantize: _lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a) else: _lowerCAmelCase : Any = trans_latent return self.vqgan.decode(__a) def snake_case__ ( self, __a, __a, __a=None): '''simple docstring''' _lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a) _lowerCAmelCase : Optional[int] = self.clip(**__a) _lowerCAmelCase : Any = clip_outputs.logits_per_image if weights is not None: _lowerCAmelCase : Tuple = similarity_logits * weights return similarity_logits.sum() def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"])) if neg_prompts: _lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"]) else: _lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device) _lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a) return loss def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device) _lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr) for i in range(self.iterations): optim.zero_grad() _lowerCAmelCase : Any = self._add_vector(__a) _lowerCAmelCase : Optional[Any] = loop_post_process(__a) _lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a) print("CLIP loss", __a) if self.log: wandb.log({"CLIP Loss": clip_loss}) clip_loss.backward(retain_graph=__a) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def snake_case__ ( self, __a, __a, __a): '''simple docstring''' wandb.init(reinit=__a, project="face-editor") wandb.config.update({"Positive Prompts": positive_prompts}) wandb.config.update({"Negative Prompts": negative_prompts}) wandb.config.update({"lr": self.lr, "iterations": self.iterations}) if image_path: _lowerCAmelCase : str = Image.open(__a) _lowerCAmelCase : int = image.resize((256, 256)) wandb.log("Original Image", wandb.Image(__a)) def snake_case__ ( self, __a): '''simple docstring''' if not prompts: return [] _lowerCAmelCase : int = [] _lowerCAmelCase : List[str] = [] if isinstance(__a, __a): _lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")] for prompt in prompts: if isinstance(__a, (tuple, list)): _lowerCAmelCase : Optional[Any] = prompt[0] _lowerCAmelCase : Union[str, Any] = float(prompt[1]) elif ":" in prompt: _lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":") _lowerCAmelCase : Optional[Any] = float(__a) else: _lowerCAmelCase : Optional[int] = prompt _lowerCAmelCase : List[Any] = 1.0 processed_prompts.append(__a) weights.append(__a) return { "prompts": processed_prompts, "weights": torch.tensor(__a, device=self.device), } def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ): '''simple docstring''' if image_path: _lowerCAmelCase : List[Any] = self._get_latent(__a) else: _lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device) if self.log: self._init_logging(__a, __a, __a) assert pos_prompts, "You must provide at least one positive prompt." _lowerCAmelCase : int = self.process_prompts(__a) _lowerCAmelCase : List[str] = self.process_prompts(__a) if save_final and save_path is None: _lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"])) if not os.path.exists(__a): os.makedirs(__a) else: _lowerCAmelCase : Tuple = save_path + "_" + get_timestamp() os.makedirs(__a) _lowerCAmelCase : Tuple = save_path _lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0] if show_intermediate: print("Original Image") show_pil(custom_to_pil(__a)) _lowerCAmelCase : int = loop_post_process(__a) for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)): if show_intermediate: show_pil(__a) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png")) if self.log: wandb.log({"Image": wandb.Image(__a)}) if show_final: show_pil(__a) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
36
0
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean a__ : Optional[int] = 0 a__ : List[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a__ : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right a__ : Dict = tuple[int, int] class UpperCamelCase_ : """simple docstring""" def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , ) -> Any: __SCREAMING_SNAKE_CASE = pos_x __SCREAMING_SNAKE_CASE = pos_y __SCREAMING_SNAKE_CASE = (pos_y, pos_x) __SCREAMING_SNAKE_CASE = goal_x __SCREAMING_SNAKE_CASE = goal_y __SCREAMING_SNAKE_CASE = g_cost __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = self.calculate_heuristic() __SCREAMING_SNAKE_CASE = self.g_cost + self.h_cost def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE = self.pos_x - self.goal_x __SCREAMING_SNAKE_CASE = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(__a ) + abs(__a ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Tuple: return self.f_cost < other.f_cost class UpperCamelCase_ : """simple docstring""" def __init__( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __a ) __SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , __a ) __SCREAMING_SNAKE_CASE = [self.start] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = False def UpperCAmelCase_ ( self : Any ) -> List[Any]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __SCREAMING_SNAKE_CASE = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(__a ) self.closed_nodes.append(__a ) __SCREAMING_SNAKE_CASE = self.get_successors(__a ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__a ) else: # retrieve the best current path __SCREAMING_SNAKE_CASE = self.open_nodes.pop(self.open_nodes.index(__a ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__a ) else: self.open_nodes.append(__a ) return [self.start.pos] def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = [] for action in delta: __SCREAMING_SNAKE_CASE = parent.pos_x + action[1] __SCREAMING_SNAKE_CASE = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __a , __a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __a , ) ) return successors def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE = node __SCREAMING_SNAKE_CASE = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __SCREAMING_SNAKE_CASE = current_node.parent path.reverse() return path class UpperCamelCase_ : """simple docstring""" def __init__( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = AStar(__a , __a ) __SCREAMING_SNAKE_CASE = AStar(__a , __a ) __SCREAMING_SNAKE_CASE = False def UpperCAmelCase_ ( self : Any ) -> List[Any]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __SCREAMING_SNAKE_CASE = self.fwd_astar.open_nodes.pop(0 ) __SCREAMING_SNAKE_CASE = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( __a , __a ) self.fwd_astar.closed_nodes.append(__a ) self.bwd_astar.closed_nodes.append(__a ) __SCREAMING_SNAKE_CASE = current_bwd_node __SCREAMING_SNAKE_CASE = current_fwd_node __SCREAMING_SNAKE_CASE = { self.fwd_astar: self.fwd_astar.get_successors(__a ), self.bwd_astar: self.bwd_astar.get_successors(__a ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(__a ) else: # retrieve the best current path __SCREAMING_SNAKE_CASE = astar.open_nodes.pop( astar.open_nodes.index(__a ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(__a ) else: astar.open_nodes.append(__a ) return [self.fwd_astar.start.pos] def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Dict: __SCREAMING_SNAKE_CASE = self.fwd_astar.retrace_path(__a ) __SCREAMING_SNAKE_CASE = self.bwd_astar.retrace_path(__a ) bwd_path.pop() bwd_path.reverse() __SCREAMING_SNAKE_CASE = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] a__ : List[Any] = (0, 0) a__ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) a__ : Union[str, Any] = time.time() a__ : Tuple = AStar(init, goal) a__ : Tuple = a_star.search() a__ : Tuple = time.time() - start_time print(F"AStar execution time = {end_time:f} seconds") a__ : Tuple = time.time() a__ : Dict = BidirectionalAStar(init, goal) a__ : List[Any] = time.time() - bd_start_time print(F"BidirectionalAStar execution time = {bd_end_time:f} seconds")
54
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 _snake_case = get_tests_dir("fixtures") class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = mock.Mock() _lowerCAmelCase : int = 500 _lowerCAmelCase : Tuple = {} _lowerCAmelCase : str = HTTPError _lowerCAmelCase : Union[str, Any] = {} # Download this model to make sure it's in the cache. _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=__a) as mock_head: _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # This check we did call the fake head request mock_head.assert_called() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json") def snake_case__ ( self): '''simple docstring''' with self.assertRaises(__a): # config is in subfolder, the following should not work without specifying the subfolder _lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants") _lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor") self.assertIsNotNone(__a) @is_staging_test class UpperCAmelCase_ ( unittest.TestCase): @classmethod def snake_case__ ( cls): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = TOKEN HfFolder.save_token(__a) @classmethod def snake_case__ ( cls): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="test-image-processor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-image-processor") except HTTPError: pass def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-image-processor", use_auth_token=self._token) _lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token) _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' CustomImageProcessor.register_for_auto_class() _lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) _lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=__a) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
36
0
"""simple docstring""" import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCamelCase ( A__ , unittest.TestCase ): '''simple docstring''' a_ : Optional[Any] = DebertaTokenizer a_ : int = True a_ : Optional[int] = DebertaTokenizerFast def lowerCamelCase ( self : str ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "[UNK]", ] lowerCAmelCase_ : str = dict(zip(__a , range(len(__a ) ) ) ) lowerCAmelCase_ : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCAmelCase_ : List[str] = {"unk_token": "[UNK]"} lowerCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__a ) ) def lowerCamelCase ( self : Optional[int] , **a_ : List[Any] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a ) def lowerCamelCase ( self : List[str] , a_ : Optional[int] ): lowerCAmelCase_ : int = "lower newer" lowerCAmelCase_ : List[str] = "lower newer" return input_text, output_text def lowerCamelCase ( self : Optional[Any] ): lowerCAmelCase_ : int = self.get_tokenizer() lowerCAmelCase_ : Dict = "lower newer" lowerCAmelCase_ : List[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] lowerCAmelCase_ : Dict = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) lowerCAmelCase_ : Dict = tokens + [tokenizer.unk_token] lowerCAmelCase_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) def lowerCamelCase ( self : str ): lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer() lowerCAmelCase_ : str = tokenizer("Hello" , "World" ) lowerCAmelCase_ : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd["token_type_ids"] , __a ) @slow def lowerCamelCase ( self : Optional[Any] ): lowerCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" ) lowerCAmelCase_ : List[Any] = tokenizer.encode("sequence builders" , add_special_tokens=__a ) lowerCAmelCase_ : str = tokenizer.encode("multi-sequence build" , add_special_tokens=__a ) lowerCAmelCase_ : Tuple = tokenizer.encode( "sequence builders" , add_special_tokens=__a , add_prefix_space=__a ) lowerCAmelCase_ : Optional[Any] = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=__a , add_prefix_space=__a ) lowerCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(__a ) lowerCAmelCase_ : Any = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def lowerCamelCase ( self : Any ): lowerCAmelCase_ : Union[str, Any] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: lowerCAmelCase_ : List[Any] = tokenizer_class.from_pretrained("microsoft/deberta-base" ) lowerCAmelCase_ : int = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] lowerCAmelCase_ : Dict = tokenizer(__a , padding=__a ) lowerCAmelCase_ : Tuple = [tokenizer.decode(__a , skip_special_tokens=__a ) for seq in encoding["input_ids"]] # fmt: off lowerCAmelCase_ : int = { "input_ids": [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], "token_type_ids": [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on lowerCAmelCase_ : int = [ "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "ALBERT incorporates two parameter reduction techniques", "The first one is a factorized embedding parameterization. By decomposing the large vocabulary" " embedding matrix into two small matrices, we separate the size of the hidden layers from the size of" " vocabulary embedding.", ] self.assertDictEqual(encoding.data , __a ) for expected, decoded in zip(__a , __a ): self.assertEqual(__a , __a )
241
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : List[str] = batch_size _lowerCAmelCase : int = seq_length _lowerCAmelCase : Optional[int] = is_training _lowerCAmelCase : Dict = use_input_mask _lowerCAmelCase : List[str] = use_token_type_ids _lowerCAmelCase : str = use_labels _lowerCAmelCase : Optional[Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : List[Any] = num_hidden_layers _lowerCAmelCase : Optional[Any] = num_attention_heads _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : List[str] = hidden_act _lowerCAmelCase : Union[str, Any] = hidden_dropout_prob _lowerCAmelCase : Any = attention_probs_dropout_prob _lowerCAmelCase : int = max_position_embeddings _lowerCAmelCase : Optional[int] = type_vocab_size _lowerCAmelCase : Optional[Any] = type_sequence_label_size _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : List[Any] = num_labels _lowerCAmelCase : Tuple = scope _lowerCAmelCase : str = range_bbox def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) _lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCAmelCase : Dict = bbox[i, j, 3] _lowerCAmelCase : int = bbox[i, j, 1] _lowerCAmelCase : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCAmelCase : str = bbox[i, j, 2] _lowerCAmelCase : List[Any] = bbox[i, j, 0] _lowerCAmelCase : str = t _lowerCAmelCase : Optional[Any] = None if self.use_input_mask: _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) _lowerCAmelCase : Dict = None if self.use_token_type_ids: _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Optional[Any] = None if self.use_labels: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size) _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels) _lowerCAmelCase : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def snake_case__ ( self): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = LiltModel(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a) _lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a) _lowerCAmelCase : List[Any] = model(__a, bbox=__a) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Dict = model( __a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Tuple = model( __a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Dict = config_and_inputs _lowerCAmelCase : List[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class UpperCAmelCase_ ( a , a , a , unittest.TestCase): lowerCamelCase__ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def snake_case__ ( self, __a, __a, __a, __a, __a): '''simple docstring''' return True def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = LiltModelTester(self) _lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37) def snake_case__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase : Any = type self.model_tester.create_and_check_model(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) @slow def snake_case__ ( self): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : str = LiltModel.from_pretrained(__a) self.assertIsNotNone(__a) @require_torch @slow class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a) _lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a) _lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a) # forward pass with torch.no_grad(): _lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a) _lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768]) _lowerCAmelCase : List[str] = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, ) self.assertTrue(outputs.last_hidden_state.shape, __a) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
36
0
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class a__ ( lowerCamelCase_ ): def __init__( self ): """simple docstring""" _lowercase : List[str] = [] def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_init_end" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_train_begin" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_train_end" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_epoch_begin" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_epoch_end" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_step_begin" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_step_end" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_evaluate" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_predict" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_save" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_log" ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): """simple docstring""" self.events.append("on_prediction_step" ) @require_torch class a__ ( unittest.TestCase ): def _lowerCamelCase ( self ): """simple docstring""" _lowercase : int = tempfile.mkdtemp() def _lowerCamelCase ( self ): """simple docstring""" shutil.rmtree(self.output_dir ) def _lowerCamelCase ( self , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=64 , _UpperCamelCase=64 , _UpperCamelCase=None , _UpperCamelCase=False , **_UpperCamelCase ): """simple docstring""" _lowercase : Optional[int] = RegressionDataset(length=__a ) _lowercase : Union[str, Any] = RegressionDataset(length=__a ) _lowercase : List[str] = RegressionModelConfig(a=__a , b=__a ) _lowercase : str = RegressionPreTrainedModel(__a ) _lowercase : str = TrainingArguments(self.output_dir , disable_tqdm=__a , report_to=[] , **__a ) return Trainer( __a , __a , train_dataset=__a , eval_dataset=__a , callbacks=__a , ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" self.assertEqual(len(__a ) , len(__a ) ) # Order doesn't matter _lowercase : Any = sorted(__a , key=lambda _UpperCamelCase : cb.__name__ if isinstance(__a , __a ) else cb.__class__.__name__ ) _lowercase : str = sorted(__a , key=lambda _UpperCamelCase : cb.__name__ if isinstance(__a , __a ) else cb.__class__.__name__ ) for cba, cba in zip(__a , __a ): if isinstance(__a , __a ) and isinstance(__a , __a ): self.assertEqual(__a , __a ) elif isinstance(__a , __a ) and not isinstance(__a , __a ): self.assertEqual(__a , cba.__class__ ) elif not isinstance(__a , __a ) and isinstance(__a , __a ): self.assertEqual(cba.__class__ , __a ) else: self.assertEqual(__a , __a ) def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" _lowercase : Optional[Any] = ["on_init_end", "on_train_begin"] _lowercase : str = 0 _lowercase : List[Any] = len(trainer.get_eval_dataloader() ) _lowercase : List[str] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"] for _ in range(trainer.state.num_train_epochs ): expected_events.append("on_epoch_begin" ) for _ in range(__a ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("on_log" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("on_save" ) expected_events.append("on_epoch_end" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _lowerCamelCase ( self ): """simple docstring""" _lowercase : int = self.get_trainer() _lowercase : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __a ) # Callbacks passed at init are added to the default callbacks _lowercase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(__a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __a ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _lowercase : Dict = self.get_trainer(disable_tqdm=__a ) _lowercase : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __a ) def _lowerCamelCase ( self ): """simple docstring""" _lowercase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _lowercase : Dict = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(__a ) expected_callbacks.remove(__a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __a ) _lowercase : Tuple = self.get_trainer() _lowercase : Dict = trainer.pop_callback(__a ) self.assertEqual(cb.__class__ , __a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __a ) trainer.add_callback(__a ) expected_callbacks.insert(0 , __a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __a ) # We can also add, pop, or remove by instance _lowercase : List[str] = self.get_trainer() _lowercase : int = trainer.callback_handler.callbacks[0] trainer.remove_callback(__a ) expected_callbacks.remove(__a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __a ) _lowercase : Optional[int] = self.get_trainer() _lowercase : Optional[Any] = trainer.callback_handler.callbacks[0] _lowercase : int = trainer.pop_callback(__a ) self.assertEqual(__a , __a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __a ) trainer.add_callback(__a ) expected_callbacks.insert(0 , __a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __a ) def _lowerCamelCase ( self ): """simple docstring""" import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="ignore" , category=__a ) _lowercase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _lowercase : List[Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(__a , self.get_expected_events(__a ) ) # Independent log/save/eval _lowercase : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _lowercase : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(__a , self.get_expected_events(__a ) ) _lowercase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _lowercase : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(__a , self.get_expected_events(__a ) ) _lowercase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" ) trainer.train() _lowercase : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(__a , self.get_expected_events(__a ) ) _lowercase : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" ) trainer.train() _lowercase : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(__a , self.get_expected_events(__a ) ) # A bit of everything _lowercase : int = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , ) trainer.train() _lowercase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(__a , self.get_expected_events(__a ) ) # warning should be emitted for duplicated callbacks with patch("transformers.trainer_callback.logger.warning" ) as warn_mock: _lowercase : Optional[int] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(__a ) in warn_mock.call_args[0][0]
250
import argparse import copy def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = {} with open(_lowerCamelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _lowerCAmelCase : Tuple = [] _list.append([line.split()[1], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _lowerCAmelCase : str = [] _list.append([line.split()[0], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' with open(_lowerCamelCase ) as f: _lowerCAmelCase : str = f.read(1 ) _lowerCAmelCase : str = start_node _lowerCAmelCase : List[str] = [] _lowerCAmelCase : Any = start_node _lowerCAmelCase : str = 0 while visiting not in first_solution: _lowerCAmelCase : Dict = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution: _lowerCAmelCase : List[str] = k[1] _lowerCAmelCase : List[Any] = k[0] first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase ) _lowerCAmelCase : str = best_node first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 _lowerCAmelCase : Tuple = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = [] for n in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) for kn in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) if n == kn: continue _lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase ) _lowerCAmelCase : int = kn _lowerCAmelCase : Dict = n _lowerCAmelCase : Optional[int] = 0 for k in _tmp[:-1]: _lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: _lowerCAmelCase : Optional[Any] = distance + int(i[1] ) _tmp.append(_lowerCamelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) _lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = 1 _lowerCAmelCase : int = first_solution _lowerCAmelCase : Tuple = [] _lowerCAmelCase : Tuple = distance_of_first_solution _lowerCAmelCase : Optional[int] = solution while count <= iters: _lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Dict = neighborhood[index_of_best_solution] _lowerCAmelCase : int = len(_lowerCamelCase ) - 1 _lowerCAmelCase : Union[str, Any] = False while not found: _lowerCAmelCase : Tuple = 0 while i < len(_lowerCamelCase ): if best_solution[i] != solution[i]: _lowerCAmelCase : str = best_solution[i] _lowerCAmelCase : Tuple = solution[i] break _lowerCAmelCase : int = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) _lowerCAmelCase : Optional[int] = True _lowerCAmelCase : Optional[Any] = best_solution[:-1] _lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: _lowerCAmelCase : Union[str, Any] = cost _lowerCAmelCase : List[Any] = solution else: _lowerCAmelCase : Optional[Any] = index_of_best_solution + 1 _lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution] if len(_lowerCamelCase ) >= size: tabu_list.pop(0 ) _lowerCAmelCase : int = count + 1 return best_solution_ever, best_cost def A ( _lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase : int = generate_neighbours(args.File ) _lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution( args.File , _lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Any = tabu_search( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
36
0
"""simple docstring""" from math import pow def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count A__ = int(pow(_lowerCamelCase , _lowerCamelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n A__ = backtrack( _lowerCamelCase , _lowerCamelCase , current_number + 1 , _lowerCamelCase , _lowerCamelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. A__ = backtrack( _lowerCamelCase , _lowerCamelCase , current_number + 1 , _lowerCamelCase , _lowerCamelCase ) return current_sum, solutions_count def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10): raise ValueError( 'Invalid input\n' 'needed_sum must be between 1 and 1000, power between 2 and 10.' ) return backtrack(_lowerCamelCase , _lowerCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
221
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin _snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = BartphoTokenizer lowerCamelCase__ = False lowerCamelCase__ = True def snake_case__ ( self): '''simple docstring''' super().setUp() _lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"] _lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a)))) _lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"} _lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n") _lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def snake_case__ ( self, **__a): '''simple docstring''' kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = "This is a là test" _lowerCAmelCase : Optional[int] = "This is a<unk><unk> test" return input_text, output_text def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map) _lowerCAmelCase : List[Any] = "This is a là test" _lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split() _lowerCAmelCase : str = tokenizer.tokenize(__a) self.assertListEqual(__a, __a) _lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token] _lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
36
0
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''snap-research/efficientformer-l1-300''': ( '''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json''' ), } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : Optional[Any] = "efficientformer" def __init__( self, SCREAMING_SNAKE_CASE_ = [3, 2, 6, 4], SCREAMING_SNAKE_CASE_ = [48, 96, 224, 448], SCREAMING_SNAKE_CASE_ = [True, True, True, True], SCREAMING_SNAKE_CASE_ = 448, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = 4, SCREAMING_SNAKE_CASE_ = 7, SCREAMING_SNAKE_CASE_ = 5, SCREAMING_SNAKE_CASE_ = 8, SCREAMING_SNAKE_CASE_ = 4, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 3, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1e-5, SCREAMING_SNAKE_CASE_ = "gelu", SCREAMING_SNAKE_CASE_ = 0.02, SCREAMING_SNAKE_CASE_ = 1e-12, SCREAMING_SNAKE_CASE_ = 224, SCREAMING_SNAKE_CASE_ = 1e-05, **SCREAMING_SNAKE_CASE_, ) -> Optional[Any]: super().__init__(**__a ) UpperCamelCase : Tuple = hidden_act UpperCamelCase : List[str] = hidden_dropout_prob UpperCamelCase : str = hidden_sizes UpperCamelCase : Union[str, Any] = num_hidden_layers UpperCamelCase : Dict = num_attention_heads UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Dict = layer_norm_eps UpperCamelCase : List[str] = patch_size UpperCamelCase : Optional[Any] = num_channels UpperCamelCase : Tuple = depths UpperCamelCase : str = mlp_expansion_ratio UpperCamelCase : List[str] = downsamples UpperCamelCase : int = dim UpperCamelCase : Any = key_dim UpperCamelCase : Optional[Any] = attention_ratio UpperCamelCase : int = resolution UpperCamelCase : Tuple = pool_size UpperCamelCase : Optional[int] = downsample_patch_size UpperCamelCase : List[str] = downsample_stride UpperCamelCase : str = downsample_pad UpperCamelCase : Any = drop_path_rate UpperCamelCase : Tuple = num_metaad_blocks UpperCamelCase : Dict = distillation UpperCamelCase : List[str] = use_layer_scale UpperCamelCase : Dict = layer_scale_init_value UpperCamelCase : Optional[int] = image_size UpperCamelCase : Any = batch_norm_eps
119
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ): _lowerCAmelCase : Tuple = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: _lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple return x _lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Any = output_size # determine new height and width _lowerCAmelCase : List[Any] = output_height / input_height _lowerCAmelCase : Any = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowerCAmelCase : Union[str, Any] = scale_width else: # fit height _lowerCAmelCase : Union[str, Any] = scale_height _lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase ) _lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase ) return (new_height, new_width) class UpperCAmelCase_ ( a): lowerCamelCase__ = ['pixel_values'] def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ): '''simple docstring''' super().__init__(**__a) _lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384} _lowerCAmelCase : Optional[int] = get_size_dict(__a) _lowerCAmelCase : Optional[Any] = do_resize _lowerCAmelCase : Dict = size _lowerCAmelCase : Any = keep_aspect_ratio _lowerCAmelCase : str = ensure_multiple_of _lowerCAmelCase : str = resample _lowerCAmelCase : Dict = do_rescale _lowerCAmelCase : Optional[int] = rescale_factor _lowerCAmelCase : Dict = do_normalize _lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ): '''simple docstring''' _lowerCAmelCase : List[Any] = get_size_dict(__a) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") _lowerCAmelCase : List[Any] = get_resize_output_image_size( __a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, ) return resize(__a, size=__a, resample=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a = None, **__a, ): '''simple docstring''' return rescale(__a, scale=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ): '''simple docstring''' return normalize(__a, mean=__a, std=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ): '''simple docstring''' _lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase : List[Any] = size if size is not None else self.size _lowerCAmelCase : str = get_size_dict(__a) _lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowerCAmelCase : int = resample if resample is not None else self.resample _lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std _lowerCAmelCase : Optional[Any] = make_list_of_images(__a) if not valid_images(__a): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. _lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images] if do_resize: _lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images] if do_rescale: _lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images] if do_normalize: _lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images] _lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images] _lowerCAmelCase : Optional[Any] = {"pixel_values": images} return BatchFeature(data=__a, tensor_type=__a) def snake_case__ ( self, __a, __a = None): '''simple docstring''' _lowerCAmelCase : Optional[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__a) != len(__a): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits") if is_torch_tensor(__a): _lowerCAmelCase : List[Any] = target_sizes.numpy() _lowerCAmelCase : Dict = [] for idx in range(len(__a)): _lowerCAmelCase : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a) _lowerCAmelCase : int = resized_logits[0].argmax(dim=0) semantic_segmentation.append(__a) else: _lowerCAmelCase : Dict = logits.argmax(dim=1) _lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
36
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase_ = { '''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegaForCausalLM''', '''MegaForMaskedLM''', '''MegaForMultipleChoice''', '''MegaForQuestionAnswering''', '''MegaForSequenceClassification''', '''MegaForTokenClassification''', '''MegaModel''', '''MegaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = "huggingface/label-files" _lowerCAmelCase : int = "imagenet-1k-id2label.json" _lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} _lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" _lowerCAmelCase : Optional[int] = BitConfig( conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , ) return config def A ( _lowerCamelCase ): '''simple docstring''' if "stem.conv" in name: _lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: _lowerCAmelCase : Any = name.replace("blocks" , "layers" ) if "head.fc" in name: _lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): _lowerCAmelCase : Any = "bit." + name if "bit" not in name and "classifier" not in name: _lowerCAmelCase : Dict = "bit.encoder." + name return name def A ( ): '''simple docstring''' _lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ): '''simple docstring''' _lowerCAmelCase : Dict = get_config(_lowerCamelCase ) # load original model from timm _lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() # load state_dict of original model _lowerCAmelCase : Any = timm_model.state_dict() for key in state_dict.copy().keys(): _lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase ) _lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val # load HuggingFace model _lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.eval() model.load_state_dict(_lowerCamelCase ) # create image processor _lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) ) _lowerCAmelCase : Optional[int] = transform.transforms _lowerCAmelCase : Tuple = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _lowerCAmelCase : Tuple = BitImageProcessor( do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 ) _lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) # verify logits with torch.no_grad(): _lowerCAmelCase : Tuple = model(_lowerCamelCase ) _lowerCAmelCase : str = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) _lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(F"Pushing model {model_name} and processor to the hub" ) model.push_to_hub(F"ybelkada/{model_name}" ) processor.push_to_hub(F"ybelkada/{model_name}" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) _snake_case = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
36
0
from maths.prime_factors import prime_factors def lowerCamelCase_ ( UpperCamelCase__ : int ) -> List[str]: """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ): __lowerCamelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(_lowerCamelCase ) if number < 1: raise ValueError('Input must be a positive integer' ) return -1 if len(prime_factors(_lowerCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
90
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _snake_case = logging.get_logger(__name__) _snake_case = { "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCAmelCase_ ( a , a): lowerCamelCase__ = 'swin' lowerCamelCase__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ): '''simple docstring''' super().__init__(**__a) _lowerCAmelCase : Any = image_size _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : Tuple = num_channels _lowerCAmelCase : List[Any] = embed_dim _lowerCAmelCase : Tuple = depths _lowerCAmelCase : Optional[Any] = len(__a) _lowerCAmelCase : int = num_heads _lowerCAmelCase : int = window_size _lowerCAmelCase : int = mlp_ratio _lowerCAmelCase : List[Any] = qkv_bias _lowerCAmelCase : str = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : Any = drop_path_rate _lowerCAmelCase : int = hidden_act _lowerCAmelCase : Tuple = use_absolute_embeddings _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : Tuple = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__a) - 1)) _lowerCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1, len(__a) + 1)] _lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices( out_features=__a, out_indices=__a, stage_names=self.stage_names) class UpperCAmelCase_ ( a): lowerCamelCase__ = version.parse('1.11') @property def snake_case__ ( self): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def snake_case__ ( self): '''simple docstring''' return 1E-4
36
0
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py a_ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. a_ = direct_transformers_import(PATH_TO_TRANSFORMERS) a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING a_ = { # used to compute the property `self.chunk_length` '''EncodecConfig''': ['''overlap'''], # used as `self.bert_model = BertModel(config, ...)` '''DPRConfig''': True, # not used in modeling files, but it's an important information '''FSMTConfig''': ['''langs'''], # used internally in the configuration class file '''GPTNeoConfig''': ['''attention_types'''], # used internally in the configuration class file '''EsmConfig''': ['''is_folding_model'''], # used during training (despite we don't have training script for these models yet) '''Mask2FormerConfig''': ['''ignore_value'''], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) '''OneFormerConfig''': ['''ignore_value''', '''norm'''], # used during preprocessing and collation, see `collating_graphormer.py` '''GraphormerConfig''': ['''spatial_pos_max'''], # used internally in the configuration class file '''T5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally '''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], '''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], # used internally in the configuration class file '''LongT5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file '''SwitchTransformersConfig''': ['''feed_forward_proj'''], # having default values other than `1e-5` - we can't fix them without breaking '''BioGptConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''GLPNConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''SegformerConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''CvtConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''PerceiverConfig''': ['''layer_norm_eps'''], # used internally to calculate the feature size '''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate `mlp_dim` '''SamVisionConfig''': ['''mlp_ratio'''], # For (head) training, but so far not implemented '''ClapAudioConfig''': ['''num_classes'''], # Not used, but providing useful information to users '''SpeechT5HifiGanConfig''': ['''sampling_rate'''], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { '''CLIPSegConfig''': True, '''DeformableDetrConfig''': True, '''DetaConfig''': True, '''DinatConfig''': True, '''DonutSwinConfig''': True, '''EfficientFormerConfig''': True, '''FSMTConfig''': True, '''JukeboxConfig''': True, '''LayoutLMv2Config''': True, '''MaskFormerSwinConfig''': True, '''MT5Config''': True, '''NatConfig''': True, '''OneFormerConfig''': True, '''PerceiverConfig''': True, '''RagConfig''': True, '''SpeechT5Config''': True, '''SwinConfig''': True, '''Swin2SRConfig''': True, '''Swinv2Config''': True, '''SwitchTransformersConfig''': True, '''TableTransformerConfig''': True, '''TapasConfig''': True, '''TransfoXLConfig''': True, '''UniSpeechConfig''': True, '''UniSpeechSatConfig''': True, '''WavLMConfig''': True, '''WhisperConfig''': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) '''JukeboxPriorConfig''': True, # TODO: @Younes (for `is_decoder`) '''Pix2StructTextConfig''': True, } ) def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ) -> Dict: """simple docstring""" lowerCAmelCase__ = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"config.{attribute}" in modeling_source or F"getattr(config, \"{attribute}\"" in modeling_source or F"getattr(self.config, \"{attribute}\"" in modeling_source ): lowerCAmelCase__ = True # Deal with multi-line cases elif ( re.search( RF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _lowerCamelCase , ) is not None ): lowerCAmelCase__ = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: lowerCAmelCase__ = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files lowerCAmelCase__ = [ "bos_index", "eos_index", "pad_index", "unk_index", "mask_index", "image_size", "use_cache", "out_features", "out_indices", ] lowerCAmelCase__ = ["encoder_no_repeat_ngram_size"] # Special cases to be allowed lowerCAmelCase__ = True if not attribute_used: lowerCAmelCase__ = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: lowerCAmelCase__ = True elif attribute in ["tie_word_embeddings"] and default_value is False: lowerCAmelCase__ = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: lowerCAmelCase__ = True elif attribute.endswith("_token_id" ): lowerCAmelCase__ = True # configuration class specific cases if not case_allowed: lowerCAmelCase__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) lowerCAmelCase__ = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def _a ( UpperCamelCase_ : str ) -> str: """simple docstring""" lowerCAmelCase__ = dict(inspect.signature(config_class.__init__ ).parameters ) lowerCAmelCase__ = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]] lowerCAmelCase__ = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass lowerCAmelCase__ = {} if len(config_class.attribute_map ) > 0: lowerCAmelCase__ = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files lowerCAmelCase__ = inspect.getsourcefile(_lowerCamelCase ) lowerCAmelCase__ = os.path.dirname(_lowerCamelCase ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. lowerCAmelCase__ = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for fn in os.listdir(_lowerCamelCase ) if fn.startswith("modeling_" )] # Get the source code strings lowerCAmelCase__ = [] for path in modeling_paths: if os.path.isfile(_lowerCamelCase ): with open(_lowerCamelCase ) as fp: modeling_sources.append(fp.read() ) lowerCAmelCase__ = [] for config_param, default_value in zip(_lowerCamelCase , _lowerCamelCase ): # `attributes` here is all the variant names for `config_param` lowerCAmelCase__ = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): unused_attributes.append(attributes[0] ) return sorted(_lowerCamelCase ) def _a ( ) -> Dict: """simple docstring""" lowerCAmelCase__ = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) lowerCAmelCase__ = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda UpperCamelCase_ : inspect.isclass(_lowerCamelCase ) and issubclass(_lowerCamelCase , _lowerCamelCase ) and inspect.getmodule(_lowerCamelCase ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: lowerCAmelCase__ = check_config_attributes_being_used(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: lowerCAmelCase__ = unused_attributes if len(_lowerCamelCase ) > 0: lowerCAmelCase__ = "The following configuration classes contain unused attributes in the corresponding modeling files:\n" for name, attributes in configs_with_unused_attributes.items(): error += F"{name}: {attributes}\n" raise ValueError(_lowerCamelCase ) if __name__ == "__main__": check_config_attributes()
340
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
36
0
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. snake_case__ : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def _a ( lowerCamelCase: List[Any] ) -> Any: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_lowerCamelCase ) def _a ( lowerCamelCase: Union[str, Any] ) -> Tuple: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main __A = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
117
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version _snake_case = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if got_ver is None or want_ver is None: raise ValueError( F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" F" reinstalling {pkg}." ) if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ): raise ImportError( F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def A ( _lowerCamelCase , _lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None else: _lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" F" got {requirement}" ) _lowerCAmelCase , _lowerCAmelCase : Dict = match[0] _lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements _lowerCAmelCase : Optional[int] = {} for w in want_range: _lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," F" but got {requirement}" ) _lowerCAmelCase , _lowerCAmelCase : Tuple = match[0] _lowerCAmelCase : Union[str, Any] = want_ver if op not in ops: raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" ) # special case if pkg == "python": _lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return # check if any version is installed try: _lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(_lowerCamelCase , _lowerCamelCase )
36
0
import math from collections.abc import Iterator from itertools import takewhile def _lowerCAmelCase (_lowerCAmelCase): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_lowerCamelCase) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCAmelCase (): UpperCamelCase_ = 2 while True: if is_prime(_lowerCamelCase): yield num num += 1 def _lowerCAmelCase (_lowerCAmelCase = 2_00_00_00): return sum(takewhile(lambda _lowerCAmelCase: x < n , prime_generator())) if __name__ == "__main__": print(F"{solution() = }")
128
import argparse from collections import defaultdict import yaml _snake_case = "docs/source/en/_toctree.yml" def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = defaultdict(_lowerCamelCase ) _lowerCAmelCase : Any = [] _lowerCAmelCase : List[str] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(_lowerCamelCase ) _lowerCAmelCase : Optional[Any] = new_doc_list _lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] _lowerCAmelCase : str = [] for duplicate_key in duplicates: _lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(_lowerCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) _lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_lowerCamelCase ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(_lowerCamelCase ) # Sort return overview_doc def A ( _lowerCamelCase=False ): '''simple docstring''' with open(_lowerCamelCase , encoding="utf-8" ) as f: _lowerCAmelCase : int = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase : List[str] = content[api_idx]["sections"] # Then to the model doc _lowerCAmelCase : Union[str, Any] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"] _lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase ) _lowerCAmelCase : int = False if new_scheduler_doc != scheduler_doc: _lowerCAmelCase : List[Any] = True if overwrite: _lowerCAmelCase : Dict = new_scheduler_doc if diff: if overwrite: _lowerCAmelCase : Tuple = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def A ( _lowerCamelCase=False ): '''simple docstring''' with open(_lowerCamelCase , encoding="utf-8" ) as f: _lowerCAmelCase : Tuple = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase : int = content[api_idx]["sections"] # Then to the model doc _lowerCAmelCase : List[str] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _lowerCAmelCase : Dict = False _lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"] _lowerCAmelCase : Tuple = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _lowerCAmelCase : List[Any] = pipeline_doc["section"] _lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase ) if overwrite: _lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc new_pipeline_docs.append(_lowerCamelCase ) # sort overall pipeline doc _lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase ) if new_pipeline_docs != pipeline_docs: _lowerCAmelCase : Dict = True if overwrite: _lowerCAmelCase : Optional[int] = new_pipeline_docs if diff: if overwrite: _lowerCAmelCase : Optional[int] = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _snake_case = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
36
0
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __snake_case ( lowerCAmelCase ): _a : str= (CMStochasticIterativeScheduler,) _a : Optional[Any]= 10 def _SCREAMING_SNAKE_CASE ( self ,**snake_case ): '''simple docstring''' lowercase : Union[str, Any] = { "num_train_timesteps": 201, "sigma_min": 0.002, "sigma_max": 80.0, } config.update(**__a ) return config def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Tuple = 10 lowercase : int = self.get_scheduler_config() lowercase : Optional[int] = self.scheduler_classes[0](**__a ) scheduler.set_timesteps(__a ) lowercase : str = scheduler.timesteps[0] lowercase : Optional[Any] = scheduler.timesteps[1] lowercase : Optional[Any] = self.dummy_sample lowercase : Optional[int] = 0.1 * sample lowercase : Union[str, Any] = scheduler.step(__a ,__a ,__a ).prev_sample lowercase : Any = scheduler.step(__a ,__a ,__a ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[str] = self.scheduler_classes[0] lowercase : Optional[Any] = self.get_scheduler_config() lowercase : List[Any] = scheduler_class(**__a ) lowercase : Union[str, Any] = 1 scheduler.set_timesteps(__a ) lowercase : Any = scheduler.timesteps lowercase : List[str] = torch.manual_seed(0 ) lowercase : str = self.dummy_model() lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(__a ): # 1. scale model input lowercase : Dict = scheduler.scale_model_input(__a ,__a ) # 2. predict noise residual lowercase : Union[str, Any] = model(__a ,__a ) # 3. predict previous sample x_t-1 lowercase : Tuple = scheduler.step(__a ,__a ,__a ,generator=__a ).prev_sample lowercase : List[Any] = pred_prev_sample lowercase : Optional[Any] = torch.sum(torch.abs(__a ) ) lowercase : List[str] = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Dict = self.scheduler_classes[0] lowercase : Dict = self.get_scheduler_config() lowercase : Tuple = scheduler_class(**__a ) lowercase : Dict = [106, 0] scheduler.set_timesteps(timesteps=__a ) lowercase : Dict = scheduler.timesteps lowercase : str = torch.manual_seed(0 ) lowercase : Tuple = self.dummy_model() lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowercase : Tuple = scheduler.scale_model_input(__a ,__a ) # 2. predict noise residual lowercase : List[str] = model(__a ,__a ) # 3. predict previous sample x_t-1 lowercase : str = scheduler.step(__a ,__a ,__a ,generator=__a ).prev_sample lowercase : Tuple = pred_prev_sample lowercase : List[Any] = torch.sum(torch.abs(__a ) ) lowercase : Tuple = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[Any] = self.scheduler_classes[0] lowercase : List[str] = self.get_scheduler_config() lowercase : Union[str, Any] = scheduler_class(**__a ) lowercase : Tuple = [39, 30, 12, 15, 0] with self.assertRaises(__a ,msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : str = self.scheduler_classes[0] lowercase : Optional[int] = self.get_scheduler_config() lowercase : List[str] = scheduler_class(**__a ) lowercase : Optional[int] = [39, 30, 12, 1, 0] lowercase : Dict = len(__a ) with self.assertRaises(__a ,msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=__a ,timesteps=__a ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = self.scheduler_classes[0] lowercase : Optional[Any] = self.get_scheduler_config() lowercase : Dict = scheduler_class(**__a ) lowercase : Any = [scheduler.config.num_train_timesteps] with self.assertRaises( __a ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,): scheduler.set_timesteps(timesteps=__a )
20
def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if density <= 0: raise ValueError("Impossible fluid density" ) if bulk_modulus <= 0: raise ValueError("Impossible bulk modulus" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
36
0
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str]=1_3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Union[str, Any]=9_9 , UpperCAmelCase__ : Any=3_2 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=5_1_2 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=4 , ) -> int: __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_choices def UpperCAmelCase_ ( self : str ) -> Tuple: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_attention_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__a , ) return config, input_ids, attention_mask def UpperCAmelCase_ ( self : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : Dict = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: __SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self ) @slow def UpperCAmelCase_ ( self : Any ) -> List[str]: for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("distilbert-base-uncased" ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) ) self.assertIsNotNone(__a ) @require_flax class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" ) __SCREAMING_SNAKE_CASE = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __SCREAMING_SNAKE_CASE = model(__a , attention_mask=__a )[0] __SCREAMING_SNAKE_CASE = (1, 1_1, 7_6_8) self.assertEqual(output.shape , __a ) __SCREAMING_SNAKE_CASE = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
54
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging _snake_case = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class UpperCAmelCase_ ( a): def __init__( self, __a = 101): '''simple docstring''' _lowerCAmelCase : str = length def __len__( self): '''simple docstring''' return self.length def __getitem__( self, __a): '''simple docstring''' return i class UpperCAmelCase_ : def __call__( self, __a): '''simple docstring''' return {"input_ids": torch.tensor(__a), "labels": torch.tensor(__a)} class UpperCAmelCase_ ( nn.Module): def __init__( self): '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. _lowerCAmelCase : str = nn.Linear(120, 80) def snake_case__ ( self, __a, __a=None): '''simple docstring''' if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids class UpperCAmelCase_ ( a): @require_torch_neuroncore def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() _lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir() _lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split() _lowerCAmelCase : List[Any] = ["torchrun"] + distributed_args + args execute_subprocess_async(__a, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class UpperCAmelCase_ ( a): @require_torch_multi_gpu def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split() _lowerCAmelCase : Any = self.get_auto_remove_tmp_dir() _lowerCAmelCase : Optional[int] = f"--output_dir {output_dir}".split() _lowerCAmelCase : Any = ["torchrun"] + distributed_args + args execute_subprocess_async(__a, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py _snake_case = HfArgumentParser((TrainingArguments,)) _snake_case = parser.parse_args_into_dataclasses()[0] logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [101, 40, 7]: _snake_case = DummyDataset(dataset_length) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = list(range(len(_lowerCamelCase ) ) ) _lowerCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( "Predictions and/or labels do not match expected results:\n - predictions: " F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" ) return {"success": success} _snake_case = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) _snake_case = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _snake_case = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _snake_case = 2 _snake_case = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) _snake_case = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) _snake_case = None
36
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __lowerCamelCase ( A__ ): '''simple docstring''' a_ : str = """trocr""" a_ : str = ["""past_key_values"""] a_ : Optional[Any] = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Union[str, Any] , a_ : List[str]=5_02_65 , a_ : Tuple=10_24 , a_ : str=12 , a_ : Tuple=16 , a_ : Tuple=40_96 , a_ : Union[str, Any]="gelu" , a_ : Union[str, Any]=5_12 , a_ : List[str]=0.1 , a_ : Dict=0.0 , a_ : List[str]=0.0 , a_ : Any=2 , a_ : Any=0.02 , a_ : int=0.0 , a_ : Optional[Any]=True , a_ : Any=False , a_ : Any=True , a_ : Optional[Any]=True , a_ : List[Any]=1 , a_ : str=0 , a_ : List[Any]=2 , **a_ : str , ): lowerCAmelCase_ : List[Any] = vocab_size lowerCAmelCase_ : Dict = d_model lowerCAmelCase_ : str = decoder_layers lowerCAmelCase_ : Dict = decoder_attention_heads lowerCAmelCase_ : int = decoder_ffn_dim lowerCAmelCase_ : int = activation_function lowerCAmelCase_ : str = max_position_embeddings lowerCAmelCase_ : Optional[Any] = dropout lowerCAmelCase_ : str = attention_dropout lowerCAmelCase_ : str = activation_dropout lowerCAmelCase_ : List[Any] = init_std lowerCAmelCase_ : Optional[Any] = decoder_layerdrop lowerCAmelCase_ : Optional[int] = use_cache lowerCAmelCase_ : str = scale_embedding lowerCAmelCase_ : Optional[int] = use_learned_position_embeddings lowerCAmelCase_ : Optional[Any] = layernorm_embedding super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
241
from __future__ import annotations import bisect def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' if hi < 0: _lowerCAmelCase : int = len(_lowerCamelCase ) while lo < hi: _lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: _lowerCAmelCase : Union[str, Any] = mid + 1 else: _lowerCAmelCase : str = mid return lo def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' if hi < 0: _lowerCAmelCase : str = len(_lowerCamelCase ) while lo < hi: _lowerCAmelCase : Tuple = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: _lowerCAmelCase : Dict = mid + 1 else: _lowerCAmelCase : str = mid return lo def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ): '''simple docstring''' sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = 0 _lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) - 1 while left <= right: _lowerCAmelCase : int = left + (right - left) // 2 _lowerCAmelCase : int = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: _lowerCAmelCase : str = midpoint - 1 else: _lowerCAmelCase : Any = midpoint + 1 return None def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase ) if index != len(_lowerCamelCase ) and sorted_collection[index] == item: return index return None def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if right < left: return None _lowerCAmelCase : Optional[int] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 ) else: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase ) if __name__ == "__main__": _snake_case = input("Enter numbers separated by comma:\n").strip() _snake_case = sorted(int(item) for item in user_input.split(",")) _snake_case = int(input("Enter a single number to be found in the list:\n")) _snake_case = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
36
0
'''simple docstring''' import random from .binary_exp_mod import bin_exp_mod def _A ( snake_case , snake_case=10_00 ) -> int: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd _lowercase : int = n - 1 _lowercase : Optional[Any] = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) _lowercase : Dict = 0 while count < prec: _lowercase : Union[str, Any] = random.randint(2 , n - 1 ) _lowercase : Dict = bin_exp_mod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if b != 1: _lowercase : Tuple = True for _ in range(_lowerCamelCase ): if b == n - 1: _lowercase : str = False break _lowercase : Any = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": _snake_case = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
250
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class UpperCAmelCase_ ( a): def snake_case__ ( self, __a): '''simple docstring''' return 0.0 def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = 512 _lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1) _lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs] _lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) ) _lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds _lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_lowerCamelCase ) plt.show() def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = 512 _lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1) _lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs] _lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler _lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) ) plt.show()
36
0
"""simple docstring""" import math from collections.abc import Callable def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" A__ = xa A__ = xa while True: if x_n == x_na or function(_lowerCamelCase ) == function(_lowerCamelCase ): raise ZeroDivisionError('float division by zero, could not find root' ) A__ = x_na - ( function(_lowerCamelCase ) / ((function(_lowerCamelCase ) - function(_lowerCamelCase )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na A__ = x_na A__ = x_na def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" return math.pow(_lowerCamelCase , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
221
def A ( _lowerCamelCase ): '''simple docstring''' if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence _lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase ) # # convert them to integers for i in range(len(_lowerCamelCase ) ): _lowerCAmelCase : List[str] = int(sequence[i] , 2 ) return sequence def A ( _lowerCamelCase ): '''simple docstring''' if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] _lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits _lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 ) _lowerCAmelCase : str = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): _lowerCAmelCase : Dict = "0" + smaller_sequence[i] sequence.append(_lowerCamelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): _lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i] sequence.append(_lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
36
0
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def UpperCamelCase ( snake_case__ : int , snake_case__ : Dict , snake_case__ : Dict = 16000 ) -> Optional[Any]: UpperCamelCase : List[str] = int(round(sample_rate * max_length ) ) if len(_lowerCamelCase ) <= sample_length: return wav UpperCamelCase : Tuple = randint(0 , len(_lowerCamelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : str = field(default=a__ , metadata={"help": "Name of a dataset from the datasets package"} ) UpperCAmelCase__ : int = field( default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) UpperCAmelCase__ : Optional[int] = field( default=a__ , metadata={"help": "A file containing the training audio paths and labels."} ) UpperCAmelCase__ : Tuple = field( default=a__ , metadata={"help": "A file containing the validation audio paths and labels."} ) UpperCAmelCase__ : Any = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'" } , ) UpperCAmelCase__ : Any = field( default="validation" , metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to \'validation\'" ) } , ) UpperCAmelCase__ : Any = field( default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to \'audio\'"} , ) UpperCAmelCase__ : Union[str, Any] = field( default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to \'label\'"} ) UpperCAmelCase__ : int = field( default=a__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) UpperCAmelCase__ : Tuple = field( default=a__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) UpperCAmelCase__ : str = field( default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , ) @dataclass class lowerCAmelCase_ : UpperCAmelCase__ : int = field( default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) UpperCAmelCase__ : Dict = field( default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase__ : Union[str, Any] = field( default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} ) UpperCAmelCase__ : Optional[Any] = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) UpperCAmelCase__ : List[str] = field( default=a__ , metadata={"help": "Name or path of preprocessor config."} ) UpperCAmelCase__ : List[str] = field( default=a__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) UpperCAmelCase__ : Union[str, Any] = field( default=a__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} ) UpperCAmelCase__ : List[Any] = field( default=a__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) UpperCAmelCase__ : str = field( default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) UpperCAmelCase__ : Tuple = field( default=a__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def snake_case_ ( self ) -> List[Any]: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( 'The argument `--freeze_feature_extractor` is deprecated and ' 'will be removed in a future version. Use `--freeze_feature_encoder`' 'instead. Setting `freeze_feature_encoder==True`.', __a, ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( 'The argument `--freeze_feature_extractor` is deprecated and ' 'should not be used in combination with `--freeze_feature_encoder`.' 'Only make use of `--freeze_feature_encoder`.' ) def UpperCamelCase ( ) -> str: UpperCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase : List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_audio_classification' , _lowerCamelCase , _lowerCamelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCamelCase : Tuple = training_args.get_process_log_level() logger.setLevel(_lowerCamelCase ) transformers.utils.logging.set_verbosity(_lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. UpperCamelCase : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCamelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to train from scratch.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset and prepare it for the audio classification task. UpperCamelCase : Tuple = DatasetDict() UpperCamelCase : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase : List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ 'Make sure to set `--audio_column_name` to the correct audio column - one of ' F"""{", ".join(raw_datasets["train"].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ 'Make sure to set `--label_column_name` to the correct text column - one of ' F"""{", ".join(raw_datasets["train"].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy UpperCamelCase : Any = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. UpperCamelCase : Any = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) UpperCamelCase : Union[str, Any] = feature_extractor.model_input_names[0] def train_transforms(snake_case__ : Dict ): UpperCamelCase : Dict = [] for audio in batch[data_args.audio_column_name]: UpperCamelCase : Any = random_subsample( audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_lowerCamelCase ) UpperCamelCase : List[str] = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate ) UpperCamelCase : Optional[int] = {model_input_name: inputs.get(_lowerCamelCase )} UpperCamelCase : List[str] = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(snake_case__ : Any ): UpperCamelCase : Union[str, Any] = [audio["array"] for audio in batch[data_args.audio_column_name]] UpperCamelCase : Tuple = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate ) UpperCamelCase : List[str] = {model_input_name: inputs.get(_lowerCamelCase )} UpperCamelCase : Union[str, Any] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. UpperCamelCase : int = raw_datasets["train"].features[data_args.label_column_name].names UpperCamelCase : str = {}, {} for i, label in enumerate(_lowerCamelCase ): UpperCamelCase : List[str] = str(_lowerCamelCase ) UpperCamelCase : Tuple = label # Load the accuracy metric from the datasets package UpperCamelCase : Dict = evaluate.load('accuracy' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(snake_case__ : List[Any] ): UpperCamelCase : int = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_lowerCamelCase , references=eval_pred.label_ids ) UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) UpperCamelCase : Optional[int] = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: UpperCamelCase : Union[str, Any] = ( raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: UpperCamelCase : int = ( raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase ) # Initialize our trainer UpperCamelCase : Optional[Any] = Trainer( model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , ) # Training if training_args.do_train: UpperCamelCase : Any = None if training_args.resume_from_checkpoint is not None: UpperCamelCase : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCamelCase : Union[str, Any] = last_checkpoint UpperCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: UpperCamelCase : Dict = trainer.evaluate() trainer.log_metrics('eval' , _lowerCamelCase ) trainer.save_metrics('eval' , _lowerCamelCase ) # Write model card and (optionally) push to hub UpperCamelCase : int = { "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**_lowerCamelCase ) else: trainer.create_model_card(**_lowerCamelCase ) if __name__ == "__main__": main()
119
from PIL import Image def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : int = image.size _lowerCAmelCase : Any = 0 _lowerCAmelCase : Tuple = image.load() for i in range(_lowerCamelCase ): for j in range(_lowerCamelCase ): _lowerCAmelCase : Union[str, Any] = pixels[j, i] mean += pixel mean //= width * height for j in range(_lowerCamelCase ): for i in range(_lowerCamelCase ): _lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": _snake_case = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
36
0
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def snake_case ( ): UpperCAmelCase_ : Optional[Any] = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" UpperCAmelCase_ : List[str] = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw ).convert("RGB" ) return image def snake_case ( A__ ): UpperCAmelCase_ : Optional[int] = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Any = dct.pop(_lowerCamelCase ) UpperCAmelCase_ : Optional[int] = val def snake_case ( A__ ,A__ ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases UpperCAmelCase_ : Tuple = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) UpperCAmelCase_ : Optional[int] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict UpperCAmelCase_ : str = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase ,requires_grad=_lowerCamelCase ), v_bias) ) UpperCAmelCase_ : Any = qkv_bias def snake_case ( A__ ): UpperCAmelCase_ : List[str] = 3_64 if "coco" in model_name else 2_24 UpperCAmelCase_ : Dict = InstructBlipVisionConfig(image_size=_lowerCamelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: UpperCAmelCase_ : Any = TaConfig.from_pretrained("google/flan-t5-xl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: UpperCAmelCase_ : int = TaConfig.from_pretrained("google/flan-t5-xxl" ,dense_act_fn="gelu" ,bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: UpperCAmelCase_ : Union[str, Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" ,vocab_size=3_20_01 ).to_dict() elif "vicuna-13b" in model_name: UpperCAmelCase_ : Any = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" ,vocab_size=3_20_01 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 UpperCAmelCase_ : int = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict() UpperCAmelCase_ : str = InstructBlipConfig(vision_config=_lowerCamelCase ,text_config=_lowerCamelCase ,qformer_config=_lowerCamelCase ) return config, image_size @torch.no_grad() def snake_case ( A__ ,A__=None ,A__=False ): UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-uncased" ,truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: UpperCAmelCase_ : Any = TaTokenizerFast.from_pretrained("google/flan-t5-xl" ,truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) UpperCAmelCase_ : str = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" ,truncation_side="left" ,bos_token="</s>" ,unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) UpperCAmelCase_ : Optional[int] = get_blipa_config(_lowerCamelCase ) UpperCAmelCase_ : Optional[int] = InstructBlipForConditionalGeneration(_lowerCamelCase ).eval() UpperCAmelCase_ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name] # load original model print("Loading original model..." ) UpperCAmelCase_ : Optional[int] = "cuda:1" if torch.cuda.is_available() else "cpu" UpperCAmelCase_ : Optional[int] = "cuda:2" if torch.cuda.is_available() else "cpu" UpperCAmelCase_ : int = load_model_and_preprocess( name=_lowerCamelCase ,model_type=_lowerCamelCase ,is_eval=_lowerCamelCase ,device=_lowerCamelCase ) original_model.eval() print("Done!" ) # update state dict keys UpperCAmelCase_ : Union[str, Any] = original_model.state_dict() UpperCAmelCase_ : Optional[Any] = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): UpperCAmelCase_ : Optional[int] = state_dict.pop(_lowerCamelCase ) if key.startswith("Qformer.bert" ): UpperCAmelCase_ : List[Any] = key.replace("Qformer.bert" ,"qformer" ) if "attention.self" in key: UpperCAmelCase_ : Optional[int] = key.replace("self" ,"attention" ) if "llm_proj" in key: UpperCAmelCase_ : Union[str, Any] = key.replace("llm_proj" ,"language_projection" ) if "t5_proj" in key: UpperCAmelCase_ : List[str] = key.replace("t5_proj" ,"language_projection" ) if key.startswith("llm_model" ): UpperCAmelCase_ : Tuple = key.replace("llm_model" ,"language_model" ) if key.startswith("t5" ): UpperCAmelCase_ : Optional[Any] = key.replace("t5" ,"language" ) UpperCAmelCase_ : Optional[Any] = val # read in qv biases read_in_q_v_bias(_lowerCamelCase ,_lowerCamelCase ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(_lowerCamelCase ,strict=_lowerCamelCase ) UpperCAmelCase_ : int = load_demo_image() UpperCAmelCase_ : Optional[int] = "What is unusual about this image?" # create processor UpperCAmelCase_ : Union[str, Any] = BlipImageProcessor( size={"height": image_size, "width": image_size} ,image_mean=_lowerCamelCase ,image_std=_lowerCamelCase ) UpperCAmelCase_ : List[str] = InstructBlipProcessor( image_processor=_lowerCamelCase ,tokenizer=_lowerCamelCase ,qformer_tokenizer=_lowerCamelCase ,) UpperCAmelCase_ : int = processor(images=_lowerCamelCase ,text=_lowerCamelCase ,return_tensors="pt" ).to(_lowerCamelCase ) # make sure processor creates exact same pixel values UpperCAmelCase_ : Tuple = vis_processors["eval"](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase ) UpperCAmelCase_ : Any = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,_lowerCamelCase ) original_model.to(_lowerCamelCase ) hf_model.to(_lowerCamelCase ) with torch.no_grad(): if "vicuna" in model_name: UpperCAmelCase_ : List[Any] = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits UpperCAmelCase_ : Union[str, Any] = hf_model(**_lowerCamelCase ).logits else: UpperCAmelCase_ : Optional[int] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits UpperCAmelCase_ : List[str] = tokenizer("\n" ,return_tensors="pt" ).input_ids.to(_lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-1_00 ) UpperCAmelCase_ : Tuple = hf_model(**_lowerCamelCase ,labels=_lowerCamelCase ).logits print("First values of original logits:" ,original_logits[0, :3, :3] ) print("First values of HF logits:" ,logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape UpperCAmelCase_ : Dict = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) ,_lowerCamelCase ,atol=_lowerCamelCase ) print("Looks ok!" ) print("Generating with original model..." ) UpperCAmelCase_ : Dict = original_model.generate({"image": original_pixel_values, "prompt": prompt} ,num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) UpperCAmelCase_ : Any = hf_model.generate( **_lowerCamelCase ,do_sample=_lowerCamelCase ,num_beams=5 ,max_length=2_56 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? UpperCAmelCase_ : List[str] = 2 print("Original generation:" ,_lowerCamelCase ) UpperCAmelCase_ : Optional[int] = processor.batch_decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = [text.strip() for text in output_text] print("HF generation:" ,_lowerCamelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if push_to_hub: processor.push_to_hub(F"""Salesforce/{model_name}""" ) hf_model.push_to_hub(F"""Salesforce/{model_name}""" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() lowerCamelCase_ = [ '''instructblip-vicuna-7b''', '''instructblip-vicuna-13b''', '''instructblip-flan-t5-xl''', '''instructblip-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''instructblip-flan-t5-xl''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) lowerCamelCase_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
268
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { "facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class UpperCAmelCase_ ( a): lowerCamelCase__ = 'wav2vec2' def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ): '''simple docstring''' super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a) _lowerCAmelCase : str = hidden_size _lowerCAmelCase : Optional[int] = feat_extract_norm _lowerCAmelCase : Union[str, Any] = feat_extract_activation _lowerCAmelCase : Optional[Any] = list(__a) _lowerCAmelCase : List[str] = list(__a) _lowerCAmelCase : str = list(__a) _lowerCAmelCase : List[str] = conv_bias _lowerCAmelCase : str = num_conv_pos_embeddings _lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups _lowerCAmelCase : str = len(self.conv_dim) _lowerCAmelCase : List[str] = num_hidden_layers _lowerCAmelCase : str = intermediate_size _lowerCAmelCase : Any = hidden_act _lowerCAmelCase : int = num_attention_heads _lowerCAmelCase : Optional[Any] = hidden_dropout _lowerCAmelCase : List[str] = attention_dropout _lowerCAmelCase : Tuple = activation_dropout _lowerCAmelCase : int = feat_proj_dropout _lowerCAmelCase : List[str] = final_dropout _lowerCAmelCase : int = layerdrop _lowerCAmelCase : int = layer_norm_eps _lowerCAmelCase : Union[str, Any] = initializer_range _lowerCAmelCase : str = vocab_size _lowerCAmelCase : Optional[Any] = do_stable_layer_norm _lowerCAmelCase : Any = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCAmelCase : str = apply_spec_augment _lowerCAmelCase : Optional[Any] = mask_time_prob _lowerCAmelCase : Optional[int] = mask_time_length _lowerCAmelCase : List[str] = mask_time_min_masks _lowerCAmelCase : Optional[int] = mask_feature_prob _lowerCAmelCase : Optional[int] = mask_feature_length _lowerCAmelCase : List[str] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCAmelCase : Union[str, Any] = num_codevectors_per_group _lowerCAmelCase : str = num_codevector_groups _lowerCAmelCase : Optional[int] = contrastive_logits_temperature _lowerCAmelCase : Optional[int] = feat_quantizer_dropout _lowerCAmelCase : Optional[int] = num_negatives _lowerCAmelCase : Union[str, Any] = codevector_dim _lowerCAmelCase : Any = proj_codevector_dim _lowerCAmelCase : Optional[int] = diversity_loss_weight # ctc loss _lowerCAmelCase : Tuple = ctc_loss_reduction _lowerCAmelCase : Tuple = ctc_zero_infinity # adapter _lowerCAmelCase : List[Any] = add_adapter _lowerCAmelCase : List[str] = adapter_kernel_size _lowerCAmelCase : str = adapter_stride _lowerCAmelCase : List[str] = num_adapter_layers _lowerCAmelCase : str = output_hidden_size or hidden_size _lowerCAmelCase : Tuple = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCAmelCase : str = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCAmelCase : str = list(__a) _lowerCAmelCase : Union[str, Any] = list(__a) _lowerCAmelCase : List[str] = list(__a) _lowerCAmelCase : Tuple = xvector_output_dim @property def snake_case__ ( self): '''simple docstring''' return functools.reduce(operator.mul, self.conv_stride, 1)
36
0
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets __A = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" __A = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" __A = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): """simple docstring""" def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=False ) -> List[str]: '''simple docstring''' if rouge_types is None: __lowerCamelCase = ["rouge1", "rouge2", "rougeL", "rougeLsum"] __lowerCamelCase = rouge_scorer.RougeScorer(rouge_types=__a , use_stemmer=__a ) if use_aggregator: __lowerCamelCase = scoring.BootstrapAggregator() else: __lowerCamelCase = [] for ref, pred in zip(__a , __a ): __lowerCamelCase = scorer.score(__a , __a ) if use_aggregator: aggregator.add_scores(__a ) else: scores.append(__a ) if use_aggregator: __lowerCamelCase = aggregator.aggregate() else: __lowerCamelCase = {} for key in scores[0]: __lowerCamelCase = [score[key] for score in scores] return result
90
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( 'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , ) class UpperCAmelCase_ ( a): lowerCamelCase__ = RobertaConfig lowerCamelCase__ = 'roberta' def __init__( self, __a): '''simple docstring''' super().__init__(__a) _lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a) self.init_weights() @add_start_docstrings( 'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , ) class UpperCAmelCase_ ( a): lowerCamelCase__ = RobertaConfig lowerCamelCase__ = 'roberta' def __init__( self, __a): '''simple docstring''' super().__init__(__a) _lowerCAmelCase : Optional[int] = config.num_labels _lowerCAmelCase : Optional[int] = config.num_hidden_layers _lowerCAmelCase : Optional[int] = DeeRobertaModel(__a) _lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob) _lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels) @add_start_docstrings_to_model_forward(__a) def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.num_layers try: _lowerCAmelCase : List[Any] = self.roberta( __a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, ) _lowerCAmelCase : List[Any] = outputs[1] _lowerCAmelCase : Dict = self.dropout(__a) _lowerCAmelCase : Dict = self.classifier(__a) _lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _lowerCAmelCase : Tuple = e.message _lowerCAmelCase : Union[str, Any] = e.exit_layer _lowerCAmelCase : List[Any] = outputs[0] if not self.training: _lowerCAmelCase : int = entropy(__a) _lowerCAmelCase : List[Any] = [] _lowerCAmelCase : str = [] if labels is not None: if self.num_labels == 1: # We are doing regression _lowerCAmelCase : Optional[Any] = MSELoss() _lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1)) else: _lowerCAmelCase : Optional[Any] = CrossEntropyLoss() _lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) # work with highway exits _lowerCAmelCase : Optional[int] = [] for highway_exit in outputs[-1]: _lowerCAmelCase : Any = highway_exit[0] if not self.training: highway_logits_all.append(__a) highway_entropy.append(highway_exit[2]) if self.num_labels == 1: # We are doing regression _lowerCAmelCase : List[str] = MSELoss() _lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1)) else: _lowerCAmelCase : Dict = CrossEntropyLoss() _lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1)) highway_losses.append(__a) if train_highway: _lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs # exclude the final highway, of course else: _lowerCAmelCase : Any = (loss,) + outputs if not self.training: _lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _lowerCAmelCase : Optional[Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
36
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class lowercase__ ( _UpperCAmelCase ): a_ ="""camembert""" def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> Optional[int]: '''simple docstring''' super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = hidden_act lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = position_embedding_type lowerCAmelCase__ = use_cache lowerCAmelCase__ = classifier_dropout class lowercase__ ( _UpperCAmelCase ): @property def UpperCAmelCase ( self )-> int: '''simple docstring''' if self.task == "multiple-choice": lowerCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCAmelCase__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
340
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( a): lowerCamelCase__ = 'vision-encoder-decoder' lowerCamelCase__ = True def __init__( self, **__a): '''simple docstring''' super().__init__(**__a) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"A configuraton of type {self.model_type} cannot be instantiated because " f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}") _lowerCAmelCase : str = kwargs.pop("encoder") _lowerCAmelCase : Any = encoder_config.pop("model_type") _lowerCAmelCase : str = kwargs.pop("decoder") _lowerCAmelCase : List[str] = decoder_config.pop("model_type") _lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : Optional[Any] = AutoConfig.for_model(__a, **__a) _lowerCAmelCase : Optional[int] = True @classmethod def snake_case__ ( cls, __a, __a, **__a): '''simple docstring''' logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config") _lowerCAmelCase : Optional[Any] = True _lowerCAmelCase : str = True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = copy.deepcopy(self.__dict__) _lowerCAmelCase : List[str] = self.encoder.to_dict() _lowerCAmelCase : List[str] = self.decoder.to_dict() _lowerCAmelCase : Any = self.__class__.model_type return output class UpperCAmelCase_ ( a): lowerCamelCase__ = version.parse('1.11') @property def snake_case__ ( self): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def snake_case__ ( self): '''simple docstring''' return 1E-4 @property def snake_case__ ( self): '''simple docstring''' return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}}) class UpperCAmelCase_ ( a): @property def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = OrderedDict() _lowerCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"} _lowerCAmelCase : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"} _lowerCAmelCase : Optional[Any] = {0: "batch", 1: "encoder_sequence"} return common_inputs def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ): '''simple docstring''' import torch _lowerCAmelCase : Optional[Any] = OrderedDict() _lowerCAmelCase : List[str] = super().generate_dummy_inputs( __a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a) _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = dummy_input["input_ids"].shape _lowerCAmelCase : str = (batch, encoder_sequence, self._config.encoder_hidden_size) _lowerCAmelCase : List[str] = dummy_input.pop("input_ids") _lowerCAmelCase : List[str] = dummy_input.pop("attention_mask") _lowerCAmelCase : Optional[int] = torch.zeros(__a) return common_inputs class UpperCAmelCase_ ( a): @property def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self, __a): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(__a) def snake_case__ ( self, __a, __a, __a = "default"): '''simple docstring''' _lowerCAmelCase : Dict = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__a, __a)
36
0
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class A_ ( unittest.TestCase ): def _lowerCAmelCase (self :Union[str, Any] )-> int: __A = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() __A = dict(zip(__a , range(len(__a ) ) ) ) __A = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } __A = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 1_6000, "return_attention_mask": False, "do_normalize": True, } __A = tempfile.mkdtemp() __A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __A = os.path.join(self.tmpdirname , __a ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__a ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__a ) + '''\n''' ) # load decoder from hub __A = "hf-internal-testing/ngram-beam-search-decoder" def _lowerCAmelCase (self :str , **_UpperCamelCase :Tuple )-> Dict: __A = self.add_kwargs_tokens_map.copy() kwargs.update(__a ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__a ) def _lowerCAmelCase (self :int , **_UpperCamelCase :List[Any] )-> Dict: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__a ) def _lowerCAmelCase (self :int , **_UpperCamelCase :Tuple )-> str: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__a ) def _lowerCAmelCase (self :str )-> List[str]: shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase (self :Union[str, Any] )-> Optional[Any]: __A = self.get_tokenizer() __A = self.get_feature_extractor() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a ) processor.save_pretrained(self.tmpdirname ) __A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __a ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , __a ) def _lowerCAmelCase (self :Tuple )-> List[str]: __A = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __A = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def _lowerCAmelCase (self :Dict )-> Optional[Any]: __A = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(__a , '''include''' ): WavaVecaProcessorWithLM( tokenizer=__a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def _lowerCAmelCase (self :Optional[int] )-> List[str]: __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a ) __A = floats_list((3, 1000) ) __A = feature_extractor(__a , return_tensors='''np''' ) __A = processor(__a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCAmelCase (self :List[Any] )-> List[str]: __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a ) __A = "This is a test string" __A = processor(text=__a ) __A = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase (self :Any , _UpperCamelCase :Any=(2, 10, 16) , _UpperCamelCase :List[str]=77 )-> int: np.random.seed(__a ) return np.random.rand(*__a ) def _lowerCAmelCase (self :Optional[Any] )-> Optional[Any]: __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a ) __A = self._get_dummy_logits(shape=(10, 16) , seed=13 ) __A = processor.decode(__a ) __A = decoder.decode_beams(__a )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def _lowerCAmelCase (self :int , _UpperCamelCase :int )-> Dict: __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a ) __A = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __A = processor.batch_decode(__a ) else: with get_context(__a ).Pool() as pool: __A = processor.batch_decode(__a , __a ) __A = list(__a ) with get_context('''fork''' ).Pool() as p: __A = decoder.decode_beams_batch(__a , __a ) __A = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(__a , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(__a , decoded_processor.logit_score ) self.assertListEqual(__a , decoded_processor.lm_score ) def _lowerCAmelCase (self :Any )-> List[Any]: __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a ) __A = self._get_dummy_logits() __A = 15 __A = -2_0.0 __A = -4.0 __A = processor.batch_decode( __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , ) __A = decoded_processor_out.text __A = list(__a ) with get_context('''fork''' ).Pool() as pool: __A = decoder.decode_beams_batch( __a , __a , beam_width=__a , beam_prune_logp=__a , token_min_logp=__a , ) __A = [d[0][0] for d in decoded_decoder_out] __A = [d[0][2] for d in decoded_decoder_out] __A = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(__a , __a ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __a ) self.assertTrue(np.array_equal(__a , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , __a , atol=1e-3 ) ) self.assertTrue(np.array_equal(__a , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , __a , atol=1e-3 ) ) def _lowerCAmelCase (self :List[str] )-> Dict: __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a ) __A = self._get_dummy_logits() __A = 2.0 __A = 5.0 __A = -2_0.0 __A = True __A = processor.batch_decode( __a , alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , ) __A = decoded_processor_out.text __A = list(__a ) decoder.reset_params( alpha=__a , beta=__a , unk_score_offset=__a , lm_score_boundary=__a , ) with get_context('''fork''' ).Pool() as pool: __A = decoder.decode_beams_batch( __a , __a , ) __A = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(__a , __a ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __a ) __A = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , __a ) def _lowerCAmelCase (self :Union[str, Any] )-> Tuple: __A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __A = processor.decoder.model_container[processor.decoder._model_key] __A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __A = os.listdir(__a ) __A = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(__a , __a ) def _lowerCAmelCase (self :Optional[int] )-> List[str]: __A = snapshot_download('''hf-internal-testing/processor_with_lm''' ) __A = WavaVecaProcessorWithLM.from_pretrained(__a ) __A = processor.decoder.model_container[processor.decoder._model_key] __A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __A = os.listdir(__a ) __A = os.listdir(__a ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(__a , __a ) def _lowerCAmelCase (self :Any )-> List[str]: __A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __A = floats_list((3, 1000) ) __A = processor_wavaveca(__a , return_tensors='''np''' ) __A = processor_auto(__a , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) __A = self._get_dummy_logits() __A = processor_wavaveca.batch_decode(__a ) __A = processor_auto.batch_decode(__a ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def _lowerCAmelCase (self :str )-> Dict: __A = self.get_feature_extractor() __A = self.get_tokenizer() __A = self.get_decoder() __A = WavaVecaProcessorWithLM(tokenizer=__a , feature_extractor=__a , decoder=__a ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def _lowerCAmelCase (_UpperCamelCase :List[str] , _UpperCamelCase :int )-> Optional[int]: __A = [d[key] for d in offsets] return retrieved_list def _lowerCAmelCase (self :Optional[Any] )-> List[str]: __A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __A = self._get_dummy_logits()[0] __A = processor.decode(__a , output_word_offsets=__a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__a , __a ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def _lowerCAmelCase (self :List[str] )-> List[Any]: __A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __A = self._get_dummy_logits() __A = processor.batch_decode(__a , output_word_offsets=__a ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__a , __a ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(__a , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def _lowerCAmelCase (self :Optional[int] )-> Any: import torch __A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__a ) __A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) ) __A = iter(__a ) __A = next(__a ) __A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) __A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): __A = model(__a ).logits.cpu().numpy() __A = processor.decode(logits[0] , output_word_offsets=__a ) __A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __A = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] __A = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" # output words self.assertEqual(''' '''.join(self.get_from_offsets(__a , '''word''' ) ) , __a ) self.assertEqual(''' '''.join(self.get_from_offsets(__a , '''word''' ) ) , output.text ) # output times __A = torch.tensor(self.get_from_offsets(__a , '''start_time''' ) ) __A = torch.tensor(self.get_from_offsets(__a , '''end_time''' ) ) # fmt: off __A = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) __A = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(__a , __a , atol=0.0_1 ) ) self.assertTrue(torch.allclose(__a , __a , atol=0.0_1 ) )
117
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class UpperCAmelCase_ ( a): def __get__( self, __a, __a=None): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") _lowerCAmelCase : List[Any] = "__cached_" + self.fget.__name__ _lowerCAmelCase : Dict = getattr(__a, __a, __a) if cached is None: _lowerCAmelCase : str = self.fget(__a) setattr(__a, __a, __a) return cached def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Any = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"invalid truth value {val!r}" ) def A ( _lowerCamelCase ): '''simple docstring''' if is_torch_fx_proxy(_lowerCamelCase ): return True if is_torch_available(): import torch if isinstance(_lowerCamelCase , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(_lowerCamelCase , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer) ): return True return isinstance(_lowerCamelCase , np.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return isinstance(_lowerCamelCase , np.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return _is_numpy(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch return isinstance(_lowerCamelCase , torch.Tensor ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch return isinstance(_lowerCamelCase , torch.device ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_device(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import torch if isinstance(_lowerCamelCase , _lowerCamelCase ): if hasattr(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase ) else: return False return isinstance(_lowerCamelCase , torch.dtype ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import tensorflow as tf return isinstance(_lowerCamelCase , tf.Tensor ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(_lowerCamelCase , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(_lowerCamelCase ) return type(_lowerCamelCase ) == tf.Tensor def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' import jax.numpy as jnp # noqa: F811 return isinstance(_lowerCamelCase , jnp.ndarray ) def A ( _lowerCamelCase ): '''simple docstring''' return False if not is_flax_available() else _is_jax(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' if isinstance(_lowerCamelCase , (dict, UserDict) ): return {k: to_py_obj(_lowerCamelCase ) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple) ): return [to_py_obj(_lowerCamelCase ) for o in obj] elif is_tf_tensor(_lowerCamelCase ): return obj.numpy().tolist() elif is_torch_tensor(_lowerCamelCase ): return obj.detach().cpu().tolist() elif is_jax_tensor(_lowerCamelCase ): return np.asarray(_lowerCamelCase ).tolist() elif isinstance(_lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def A ( _lowerCamelCase ): '''simple docstring''' if isinstance(_lowerCamelCase , (dict, UserDict) ): return {k: to_numpy(_lowerCamelCase ) for k, v in obj.items()} elif isinstance(_lowerCamelCase , (list, tuple) ): return np.array(_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): return obj.numpy() elif is_torch_tensor(_lowerCamelCase ): return obj.detach().cpu().numpy() elif is_jax_tensor(_lowerCamelCase ): return np.asarray(_lowerCamelCase ) else: return obj class UpperCAmelCase_ ( a): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = fields(self) # Safety and consistency checks if not len(__a): raise ValueError(f"{self.__class__.__name__} has no fields.") if not all(field.default is None for field in class_fields[1:]): raise ValueError(f"{self.__class__.__name__} should not have more than one required field.") _lowerCAmelCase : Dict = getattr(self, class_fields[0].name) _lowerCAmelCase : str = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(__a): if isinstance(__a, __a): _lowerCAmelCase : Tuple = first_field.items() _lowerCAmelCase : Dict = True else: try: _lowerCAmelCase : Dict = iter(__a) _lowerCAmelCase : Any = True except TypeError: _lowerCAmelCase : Any = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__a): if ( not isinstance(__a, (list, tuple)) or not len(__a) == 2 or not isinstance(element[0], __a) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute _lowerCAmelCase : Any = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"Cannot set key/value for {element}. It needs to be a tuple (key, value).") break setattr(self, element[0], element[1]) if element[1] is not None: _lowerCAmelCase : Any = element[1] elif first_field is not None: _lowerCAmelCase : Any = first_field else: for field in class_fields: _lowerCAmelCase : Dict = getattr(self, field.name) if v is not None: _lowerCAmelCase : Union[str, Any] = v def __delitem__( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def snake_case__ ( self, *__a, **__a): '''simple docstring''' raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__( self, __a): '''simple docstring''' if isinstance(__a, __a): _lowerCAmelCase : Optional[int] = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self, __a, __a): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__a, __a) super().__setattr__(__a, __a) def __setitem__( self, __a, __a): '''simple docstring''' super().__setitem__(__a, __a) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__a, __a) def snake_case__ ( self): '''simple docstring''' return tuple(self[k] for k in self.keys()) class UpperCAmelCase_ ( a , a): @classmethod def snake_case__ ( cls, __a): '''simple docstring''' raise ValueError( f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}") class UpperCAmelCase_ ( a): lowerCamelCase__ = 'longest' lowerCamelCase__ = 'max_length' lowerCamelCase__ = 'do_not_pad' class UpperCAmelCase_ ( a): lowerCamelCase__ = 'pt' lowerCamelCase__ = 'tf' lowerCamelCase__ = 'np' lowerCamelCase__ = 'jax' class UpperCAmelCase_ : def __init__( self, __a): '''simple docstring''' _lowerCAmelCase : Tuple = context_managers _lowerCAmelCase : Dict = ExitStack() def __enter__( self): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(__a) def __exit__( self, *__a, **__a): '''simple docstring''' self.stack.__exit__(*__a, **__a) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : str = infer_framework(_lowerCamelCase ) if framework == "tf": _lowerCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : str = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : str = model_class.__name__ _lowerCAmelCase : Optional[Any] = infer_framework(_lowerCamelCase ) if framework == "tf": _lowerCAmelCase : Dict = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _lowerCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: _lowerCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def A ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ): '''simple docstring''' def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ): for k, v in d.items(): _lowerCAmelCase : Dict = str(_lowerCamelCase ) + delimiter + str(_lowerCamelCase ) if parent_key else k if v and isinstance(_lowerCamelCase , _lowerCamelCase ): yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase ).items() else: yield key, v return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) @contextmanager def A ( _lowerCamelCase , _lowerCamelCase = False ): '''simple docstring''' if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def A ( _lowerCamelCase , _lowerCamelCase=None ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.transpose(_lowerCamelCase , axes=_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.T if axes is None else array.permute(*_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase ) else: raise ValueError(F"Type not supported for transpose: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.reshape(_lowerCamelCase , _lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.reshape(*_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.reshape(_lowerCamelCase , _lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.reshape(_lowerCamelCase , _lowerCamelCase ) else: raise ValueError(F"Type not supported for reshape: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase=None ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase ) else: raise ValueError(F"Type not supported for squeeze: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.expand_dims(_lowerCamelCase , _lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.unsqueeze(dim=_lowerCamelCase ) elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase ) else: raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase ): '''simple docstring''' if is_numpy_array(_lowerCamelCase ): return np.size(_lowerCamelCase ) elif is_torch_tensor(_lowerCamelCase ): return array.numel() elif is_tf_tensor(_lowerCamelCase ): import tensorflow as tf return tf.size(_lowerCamelCase ) elif is_jax_tensor(_lowerCamelCase ): return array.size else: raise ValueError(F"Type not supported for expand_dims: {type(_lowerCamelCase )}." ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' for key, value in auto_map.items(): if isinstance(_lowerCamelCase , (tuple, list) ): _lowerCAmelCase : List[Any] = [F"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: _lowerCAmelCase : Tuple = F"{repo_id}--{value}" return auto_map def A ( _lowerCamelCase ): '''simple docstring''' for base_class in inspect.getmro(_lowerCamelCase ): _lowerCAmelCase : Tuple = base_class.__module__ _lowerCAmelCase : int = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"Could not infer framework from class {model_class}." )
36
0
import argparse import copy def _lowerCAmelCase (_lowerCAmelCase): UpperCamelCase_ = {} with open(_lowerCamelCase) as f: for line in f: if line.split()[0] not in dict_of_neighbours: UpperCamelCase_ = [] _list.append([line.split()[1], line.split()[2]]) UpperCamelCase_ = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]]) if line.split()[1] not in dict_of_neighbours: UpperCamelCase_ = [] _list.append([line.split()[0], line.split()[2]]) UpperCamelCase_ = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]]) return dict_of_neighbours def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase): with open(_lowerCamelCase) as f: UpperCamelCase_ = f.read(1) UpperCamelCase_ = start_node UpperCamelCase_ = [] UpperCamelCase_ = start_node UpperCamelCase_ = 0 while visiting not in first_solution: UpperCamelCase_ = 1_00_00 for k in dict_of_neighbours[visiting]: if int(k[1]) < int(_lowerCamelCase) and k[0] not in first_solution: UpperCamelCase_ = k[1] UpperCamelCase_ = k[0] first_solution.append(_lowerCamelCase) UpperCamelCase_ = distance_of_first_solution + int(_lowerCamelCase) UpperCamelCase_ = best_node first_solution.append(_lowerCamelCase) UpperCamelCase_ = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 UpperCamelCase_ = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1]) - 1_00_00 ) return first_solution, distance_of_first_solution def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase): UpperCamelCase_ = [] for n in solution[1:-1]: UpperCamelCase_ = solution.index(_lowerCamelCase) for kn in solution[1:-1]: UpperCamelCase_ = solution.index(_lowerCamelCase) if n == kn: continue UpperCamelCase_ = copy.deepcopy(_lowerCamelCase) UpperCamelCase_ = kn UpperCamelCase_ = n UpperCamelCase_ = 0 for k in _tmp[:-1]: UpperCamelCase_ = _tmp[_tmp.index(_lowerCamelCase) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: UpperCamelCase_ = distance + int(i[1]) _tmp.append(_lowerCamelCase) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp) UpperCamelCase_ = len(neighborhood_of_solution[0]) - 1 neighborhood_of_solution.sort(key=lambda _lowerCAmelCase: x[index_of_last_item_in_the_list]) return neighborhood_of_solution def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase): UpperCamelCase_ = 1 UpperCamelCase_ = first_solution UpperCamelCase_ = [] UpperCamelCase_ = distance_of_first_solution UpperCamelCase_ = solution while count <= iters: UpperCamelCase_ = find_neighborhood(_lowerCamelCase , _lowerCamelCase) UpperCamelCase_ = 0 UpperCamelCase_ = neighborhood[index_of_best_solution] UpperCamelCase_ = len(_lowerCamelCase) - 1 UpperCamelCase_ = False while not found: UpperCamelCase_ = 0 while i < len(_lowerCamelCase): if best_solution[i] != solution[i]: UpperCamelCase_ = best_solution[i] UpperCamelCase_ = solution[i] break UpperCamelCase_ = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node]) UpperCamelCase_ = True UpperCamelCase_ = best_solution[:-1] UpperCamelCase_ = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: UpperCamelCase_ = cost UpperCamelCase_ = solution else: UpperCamelCase_ = index_of_best_solution + 1 UpperCamelCase_ = neighborhood[index_of_best_solution] if len(_lowerCamelCase) >= size: tabu_list.pop(0) UpperCamelCase_ = count + 1 return best_solution_ever, best_cost def _lowerCAmelCase (_lowerCAmelCase=None): UpperCamelCase_ = generate_neighbours(args.File) UpperCamelCase_ = generate_first_solution( args.File , _lowerCamelCase) UpperCamelCase_ = tabu_search( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""") if __name__ == "__main__": UpperCAmelCase : List[str] =argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
128
import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( "kwargs, expected" , [ ({"num_shards": 0, "max_num_jobs": 1}, []), ({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]), ({"num_shards": 10, "max_num_jobs": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]), ({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]), ({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = _distribute_shards(**_lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, max_num_jobs, expected" , [ ({"foo": 0}, 10, [{"foo": 0}]), ({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]), ({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]), ({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]), ({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]), ] , ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[int] = _split_gen_kwargs(_lowerCamelCase , _lowerCamelCase ) assert out == expected @pytest.mark.parametrize( "gen_kwargs, expected" , [ ({"foo": 0}, 1), ({"shards": [0]}, 1), ({"shards": [0, 1, 2, 3]}, 4), ({"shards": [0, 1, 2, 3], "foo": 0}, 4), ({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4), ({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError), ] , ) def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if expected is RuntimeError: with pytest.raises(_lowerCamelCase ): _number_of_shards_in_gen_kwargs(_lowerCamelCase ) else: _lowerCAmelCase : Optional[int] = _number_of_shards_in_gen_kwargs(_lowerCamelCase ) assert out == expected
36
0
from __future__ import annotations def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple: lowercase : Tuple = get_failure_array(_lowerCamelCase ) # 2) Step through text searching for pattern lowercase : Optional[int] = 0, 0 # index into text, pattern while i < len(_lowerCamelCase ): if pattern[j] == text[i]: if j == (len(_lowerCamelCase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: lowercase : Tuple = failure[j - 1] continue i += 1 return False def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any: lowercase : int = [0] lowercase : str = 0 lowercase : Any = 1 while j < len(_lowerCamelCase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: lowercase : str = failure[i - 1] continue j += 1 failure.append(_lowerCamelCase ) return failure if __name__ == "__main__": # Test 1) lowercase : Optional[int] = """abc1abc12""" lowercase : Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc""" lowercase : Union[str, Any] = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowercase : Tuple = """ABABX""" lowercase : Tuple = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) lowercase : Tuple = """AAAB""" lowercase : str = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) lowercase : Tuple = """abcdabcy""" lowercase : List[str] = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) lowercase : Dict = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
20
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class UpperCAmelCase_ : def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"): '''simple docstring''' _lowerCAmelCase : Optional[int] = device _lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a) _lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073] _lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711] _lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std) _lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224) _lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.resize(__a) _lowerCAmelCase : List[str] = self.center_crop(__a) _lowerCAmelCase : Optional[Any] = self.normalize(__a) return images def __call__( self, __a=None, __a=None, **__a): '''simple docstring''' _lowerCAmelCase : str = self.tokenizer(text=__a, **__a) _lowerCAmelCase : List[str] = self.preprocess_img(__a) _lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class UpperCAmelCase_ ( nn.Module): def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ): '''simple docstring''' super().__init__() _lowerCAmelCase : List[str] = None _lowerCAmelCase : List[str] = device if device else get_device() if vqgan: _lowerCAmelCase : Union[str, Any] = vqgan else: _lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a) self.vqgan.eval() if clip: _lowerCAmelCase : str = clip else: _lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") self.clip.to(self.device) _lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device) _lowerCAmelCase : Any = iterations _lowerCAmelCase : List[Any] = lr _lowerCAmelCase : Tuple = log _lowerCAmelCase : List[str] = make_grid _lowerCAmelCase : int = return_val _lowerCAmelCase : Dict = quantize _lowerCAmelCase : Any = self.vqgan.decoder.z_shape def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [] if output_path is None: _lowerCAmelCase : List[Any] = "./animation.gif" if input_path is None: _lowerCAmelCase : str = self.save_path _lowerCAmelCase : str = sorted(glob(input_path + "/*")) if not len(__a): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)") if len(__a) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)") _lowerCAmelCase : Optional[int] = total_duration / len(__a) _lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a) if extend_frames: _lowerCAmelCase : Any = 1.5 _lowerCAmelCase : List[str] = 3 for file_name in paths: if file_name.endswith(".png"): images.append(imageio.imread(__a)) imageio.mimsave(__a, __a, duration=__a) print(f"gif saved to {output_path}") def snake_case__ ( self, __a=None, __a=None): '''simple docstring''' if not (path or img): raise ValueError("Input either path or tensor") if img is not None: raise NotImplementedError _lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device) _lowerCAmelCase : Dict = preprocess_vqgan(__a) _lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a) return z def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_() _lowerCAmelCase : Dict = base_latent + transform_vector if self.quantize: _lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a) else: _lowerCAmelCase : Any = trans_latent return self.vqgan.decode(__a) def snake_case__ ( self, __a, __a, __a=None): '''simple docstring''' _lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a) _lowerCAmelCase : Optional[int] = self.clip(**__a) _lowerCAmelCase : Any = clip_outputs.logits_per_image if weights is not None: _lowerCAmelCase : Tuple = similarity_logits * weights return similarity_logits.sum() def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"])) if neg_prompts: _lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"]) else: _lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device) _lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a) return loss def snake_case__ ( self, __a, __a, __a): '''simple docstring''' _lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device) _lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr) for i in range(self.iterations): optim.zero_grad() _lowerCAmelCase : Any = self._add_vector(__a) _lowerCAmelCase : Optional[Any] = loop_post_process(__a) _lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a) print("CLIP loss", __a) if self.log: wandb.log({"CLIP Loss": clip_loss}) clip_loss.backward(retain_graph=__a) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def snake_case__ ( self, __a, __a, __a): '''simple docstring''' wandb.init(reinit=__a, project="face-editor") wandb.config.update({"Positive Prompts": positive_prompts}) wandb.config.update({"Negative Prompts": negative_prompts}) wandb.config.update({"lr": self.lr, "iterations": self.iterations}) if image_path: _lowerCAmelCase : str = Image.open(__a) _lowerCAmelCase : int = image.resize((256, 256)) wandb.log("Original Image", wandb.Image(__a)) def snake_case__ ( self, __a): '''simple docstring''' if not prompts: return [] _lowerCAmelCase : int = [] _lowerCAmelCase : List[str] = [] if isinstance(__a, __a): _lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")] for prompt in prompts: if isinstance(__a, (tuple, list)): _lowerCAmelCase : Optional[Any] = prompt[0] _lowerCAmelCase : Union[str, Any] = float(prompt[1]) elif ":" in prompt: _lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":") _lowerCAmelCase : Optional[Any] = float(__a) else: _lowerCAmelCase : Optional[int] = prompt _lowerCAmelCase : List[Any] = 1.0 processed_prompts.append(__a) weights.append(__a) return { "prompts": processed_prompts, "weights": torch.tensor(__a, device=self.device), } def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ): '''simple docstring''' if image_path: _lowerCAmelCase : List[Any] = self._get_latent(__a) else: _lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device) if self.log: self._init_logging(__a, __a, __a) assert pos_prompts, "You must provide at least one positive prompt." _lowerCAmelCase : int = self.process_prompts(__a) _lowerCAmelCase : List[str] = self.process_prompts(__a) if save_final and save_path is None: _lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"])) if not os.path.exists(__a): os.makedirs(__a) else: _lowerCAmelCase : Tuple = save_path + "_" + get_timestamp() os.makedirs(__a) _lowerCAmelCase : Tuple = save_path _lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0] if show_intermediate: print("Original Image") show_pil(custom_to_pil(__a)) _lowerCAmelCase : int = loop_post_process(__a) for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)): if show_intermediate: show_pil(__a) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png")) if self.log: wandb.log({"Image": wandb.Image(__a)}) if show_final: show_pil(__a) if save_final: transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
36
0
"""simple docstring""" import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a__ : Union[str, Any] = 1_6 a__ : Any = 3_2 def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' return int(x / 2**20 ) class UpperCamelCase_ : """simple docstring""" def __enter__( self : Optional[Any] ) -> List[Any]: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero __SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated() return self def __exit__( self : int , *UpperCAmelCase__ : int ) -> List[Any]: gc.collect() torch.cuda.empty_cache() __SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated() __SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated() __SCREAMING_SNAKE_CASE = bamb(self.end - self.begin ) __SCREAMING_SNAKE_CASE = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = 16 , lowerCAmelCase_ = "bert-base-cased" , lowerCAmelCase_ = 320 , lowerCAmelCase_ = 160 , ): '''simple docstring''' __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(_lowerCamelCase ) __SCREAMING_SNAKE_CASE = load_dataset( "glue" , "mrpc" , split={"train": f"""train[:{n_train}]""", "validation": f"""validation[:{n_val}]"""} ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) __SCREAMING_SNAKE_CASE = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __SCREAMING_SNAKE_CASE = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_lowerCamelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(_lowerCamelCase , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) __SCREAMING_SNAKE_CASE = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase ) return train_dataloader, eval_dataloader def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __SCREAMING_SNAKE_CASE = config["lr"] __SCREAMING_SNAKE_CASE = int(config["num_epochs"] ) __SCREAMING_SNAKE_CASE = int(config["seed"] ) __SCREAMING_SNAKE_CASE = int(config["batch_size"] ) __SCREAMING_SNAKE_CASE = args.model_name_or_path set_seed(_lowerCamelCase ) __SCREAMING_SNAKE_CASE = get_dataloaders(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase ) # Instantiate optimizer __SCREAMING_SNAKE_CASE = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __SCREAMING_SNAKE_CASE = optimizer_cls(params=model.parameters() , lr=_lowerCamelCase ) if accelerator.state.deepspeed_plugin is not None: __SCREAMING_SNAKE_CASE = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = (len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup( optimizer=_lowerCamelCase , num_warmup_steps=0 , num_training_steps=_lowerCamelCase , ) else: __SCREAMING_SNAKE_CASE = DummyScheduler(_lowerCamelCase , total_num_steps=_lowerCamelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __SCREAMING_SNAKE_CASE = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # We need to keep track of how many total steps we have iterated over __SCREAMING_SNAKE_CASE = 0 # We also need to keep track of the stating epoch so files are named properly __SCREAMING_SNAKE_CASE = 0 # Now we train the model __SCREAMING_SNAKE_CASE = {} for epoch in range(_lowerCamelCase , _lowerCamelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_lowerCamelCase ): __SCREAMING_SNAKE_CASE = model(**_lowerCamelCase ) __SCREAMING_SNAKE_CASE = outputs.loss __SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps accelerator.backward(_lowerCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) ) accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) ) accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) ) accelerator.print( "Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) __SCREAMING_SNAKE_CASE = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=_lowerCamelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCamelCase , ) parser.add_argument( "--output_dir" , type=_lowerCamelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--peak_memory_upper_bound" , type=_lowerCamelCase , default=_lowerCamelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , ) parser.add_argument( "--n_train" , type=_lowerCamelCase , default=320 , help="Number of training examples to use." , ) parser.add_argument( "--n_val" , type=_lowerCamelCase , default=160 , help="Number of validation examples to use." , ) parser.add_argument( "--num_epochs" , type=_lowerCamelCase , default=1 , help="Number of train epochs." , ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": main()
54
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 _snake_case = get_tests_dir("fixtures") class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = mock.Mock() _lowerCAmelCase : int = 500 _lowerCAmelCase : Tuple = {} _lowerCAmelCase : str = HTTPError _lowerCAmelCase : Union[str, Any] = {} # Download this model to make sure it's in the cache. _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=__a) as mock_head: _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit") # This check we did call the fake head request mock_head.assert_called() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json") def snake_case__ ( self): '''simple docstring''' with self.assertRaises(__a): # config is in subfolder, the following should not work without specifying the subfolder _lowerCAmelCase : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants") _lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor") self.assertIsNotNone(__a) @is_staging_test class UpperCAmelCase_ ( unittest.TestCase): @classmethod def snake_case__ ( cls): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = TOKEN HfFolder.save_token(__a) @classmethod def snake_case__ ( cls): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="test-image-processor") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-image-processor") except HTTPError: pass def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-image-processor", use_auth_token=self._token) _lowerCAmelCase : str = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="test-image-processor", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(__a) image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token) _lowerCAmelCase : Tuple = ViTImageProcessor.from_pretrained("valid_org/test-image-processor") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-image-processor") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __a, repo_id="valid_org/test-image-processor-org", push_to_hub=__a, use_auth_token=self._token) _lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org") for k, v in image_processor.__dict__.items(): self.assertEqual(__a, getattr(__a, __a)) def snake_case__ ( self): '''simple docstring''' CustomImageProcessor.register_for_auto_class() _lowerCAmelCase : List[str] = CustomImageProcessor.from_pretrained(__a) image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) _lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=__a) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor")
36
0
"""simple docstring""" def __lowerCamelCase ( __UpperCamelCase = 1000 ) -> List[Any]: """simple docstring""" lowerCAmelCase_ : Dict = -1 lowerCAmelCase_ : Dict = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c lowerCAmelCase_ : Union[str, Any] = (n * n - 2 * a * n) // (2 * n - 2 * a) lowerCAmelCase_ : Any = n - a - b if c * c == (a * a + b * b): lowerCAmelCase_ : str = a * b * c if candidate >= product: lowerCAmelCase_ : Optional[Any] = candidate return product if __name__ == "__main__": print(F"""{solution() = }""")
241
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=24, __a=2, __a=6, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=None, __a=1000, ): '''simple docstring''' _lowerCAmelCase : Tuple = parent _lowerCAmelCase : List[str] = batch_size _lowerCAmelCase : int = seq_length _lowerCAmelCase : Optional[int] = is_training _lowerCAmelCase : Dict = use_input_mask _lowerCAmelCase : List[str] = use_token_type_ids _lowerCAmelCase : str = use_labels _lowerCAmelCase : Optional[Any] = vocab_size _lowerCAmelCase : Tuple = hidden_size _lowerCAmelCase : List[Any] = num_hidden_layers _lowerCAmelCase : Optional[Any] = num_attention_heads _lowerCAmelCase : Any = intermediate_size _lowerCAmelCase : List[str] = hidden_act _lowerCAmelCase : Union[str, Any] = hidden_dropout_prob _lowerCAmelCase : Any = attention_probs_dropout_prob _lowerCAmelCase : int = max_position_embeddings _lowerCAmelCase : Optional[int] = type_vocab_size _lowerCAmelCase : Optional[Any] = type_sequence_label_size _lowerCAmelCase : List[str] = initializer_range _lowerCAmelCase : List[Any] = num_labels _lowerCAmelCase : Tuple = scope _lowerCAmelCase : str = range_bbox def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) _lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCAmelCase : Dict = bbox[i, j, 3] _lowerCAmelCase : int = bbox[i, j, 1] _lowerCAmelCase : Tuple = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCAmelCase : str = bbox[i, j, 2] _lowerCAmelCase : List[Any] = bbox[i, j, 0] _lowerCAmelCase : str = t _lowerCAmelCase : Optional[Any] = None if self.use_input_mask: _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) _lowerCAmelCase : Dict = None if self.use_token_type_ids: _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : Optional[Any] = None if self.use_labels: _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size) _lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels) _lowerCAmelCase : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def snake_case__ ( self): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = LiltModel(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Dict = model(__a, bbox=__a, attention_mask=__a, token_type_ids=__a) _lowerCAmelCase : str = model(__a, bbox=__a, token_type_ids=__a) _lowerCAmelCase : List[Any] = model(__a, bbox=__a) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.num_labels _lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Dict = model( __a, bbox=__a, attention_mask=__a, token_type_ids=__a, labels=__a) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ): '''simple docstring''' _lowerCAmelCase : Optional[int] = LiltForQuestionAnswering(config=__a) model.to(__a) model.eval() _lowerCAmelCase : Tuple = model( __a, bbox=__a, attention_mask=__a, token_type_ids=__a, start_positions=__a, end_positions=__a, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Dict = config_and_inputs _lowerCAmelCase : List[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class UpperCAmelCase_ ( a , a , a , unittest.TestCase): lowerCamelCase__ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def snake_case__ ( self, __a, __a, __a, __a, __a): '''simple docstring''' return True def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[Any] = LiltModelTester(self) _lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, hidden_size=37) def snake_case__ ( self): '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCAmelCase : Any = type self.model_tester.create_and_check_model(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) @slow def snake_case__ ( self): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : str = LiltModel.from_pretrained(__a) self.assertIsNotNone(__a) @require_torch @slow class UpperCAmelCase_ ( unittest.TestCase): def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Dict = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__a) _lowerCAmelCase : Any = torch.tensor([[1, 2]], device=__a) _lowerCAmelCase : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=__a) # forward pass with torch.no_grad(): _lowerCAmelCase : Optional[Any] = model(input_ids=__a, bbox=__a) _lowerCAmelCase : Optional[int] = torch.Size([1, 2, 768]) _lowerCAmelCase : List[str] = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]], device=__a, ) self.assertTrue(outputs.last_hidden_state.shape, __a) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], __a, atol=1E-3))
36
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def _A ( snake_case ) -> List[Any]: _lowercase : List[Any] = "huggingface/label-files" _lowercase : int = "imagenet-1k-id2label.json" _lowercase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowercase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()} _lowercase : Tuple = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" _lowercase : Optional[int] = BitConfig( conv_layer=_lowerCamelCase , num_labels=10_00 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , ) return config def _A ( snake_case ) -> int: if "stem.conv" in name: _lowercase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: _lowercase : Any = name.replace("blocks" , "layers" ) if "head.fc" in name: _lowercase : Optional[Any] = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): _lowercase : Any = "bit." + name if "bit" not in name and "classifier" not in name: _lowercase : Dict = "bit.encoder." + name return name def _A ( ) -> Dict: _lowercase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowercase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def _A ( snake_case , snake_case , snake_case=False ) -> List[Any]: _lowercase : Dict = get_config(_lowerCamelCase ) # load original model from timm _lowercase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() # load state_dict of original model _lowercase : Any = timm_model.state_dict() for key in state_dict.copy().keys(): _lowercase : Dict = state_dict.pop(_lowerCamelCase ) _lowercase : Tuple = val.squeeze() if "head" in key else val # load HuggingFace model _lowercase : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.eval() model.load_state_dict(_lowerCamelCase ) # create image processor _lowercase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) ) _lowercase : Optional[int] = transform.transforms _lowercase : Tuple = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _lowercase : Tuple = BitImageProcessor( do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _lowercase : Optional[int] = prepare_img() _lowercase : Any = transform(_lowerCamelCase ).unsqueeze(0 ) _lowercase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) # verify logits with torch.no_grad(): _lowercase : Tuple = model(_lowerCamelCase ) _lowercase : str = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) _lowercase : Union[str, Any] = timm_model(_lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(F'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(F'''ybelkada/{model_name}''' ) processor.push_to_hub(F'''ybelkada/{model_name}''' ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) _snake_case = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
250
import argparse import copy def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = {} with open(_lowerCamelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _lowerCAmelCase : Tuple = [] _list.append([line.split()[1], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _lowerCAmelCase : str = [] _list.append([line.split()[0], line.split()[2]] ) _lowerCAmelCase : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' with open(_lowerCamelCase ) as f: _lowerCAmelCase : str = f.read(1 ) _lowerCAmelCase : str = start_node _lowerCAmelCase : List[str] = [] _lowerCAmelCase : Any = start_node _lowerCAmelCase : str = 0 while visiting not in first_solution: _lowerCAmelCase : Dict = 10_000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution: _lowerCAmelCase : List[str] = k[1] _lowerCAmelCase : List[Any] = k[0] first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase ) _lowerCAmelCase : str = best_node first_solution.append(_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 _lowerCAmelCase : Tuple = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10_000 ) return first_solution, distance_of_first_solution def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = [] for n in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) for kn in solution[1:-1]: _lowerCAmelCase : Dict = solution.index(_lowerCamelCase ) if n == kn: continue _lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase ) _lowerCAmelCase : int = kn _lowerCAmelCase : Dict = n _lowerCAmelCase : Optional[int] = 0 for k in _tmp[:-1]: _lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: _lowerCAmelCase : Optional[Any] = distance + int(i[1] ) _tmp.append(_lowerCamelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) _lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = 1 _lowerCAmelCase : int = first_solution _lowerCAmelCase : Tuple = [] _lowerCAmelCase : Tuple = distance_of_first_solution _lowerCAmelCase : Optional[int] = solution while count <= iters: _lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Dict = neighborhood[index_of_best_solution] _lowerCAmelCase : int = len(_lowerCamelCase ) - 1 _lowerCAmelCase : Union[str, Any] = False while not found: _lowerCAmelCase : Tuple = 0 while i < len(_lowerCamelCase ): if best_solution[i] != solution[i]: _lowerCAmelCase : str = best_solution[i] _lowerCAmelCase : Tuple = solution[i] break _lowerCAmelCase : int = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) _lowerCAmelCase : Optional[int] = True _lowerCAmelCase : Optional[Any] = best_solution[:-1] _lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: _lowerCAmelCase : Union[str, Any] = cost _lowerCAmelCase : List[Any] = solution else: _lowerCAmelCase : Optional[Any] = index_of_best_solution + 1 _lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution] if len(_lowerCamelCase ) >= size: tabu_list.pop(0 ) _lowerCAmelCase : int = count + 1 return best_solution_ever, best_cost def A ( _lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase : int = generate_neighbours(args.File ) _lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution( args.File , _lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Any = tabu_search( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , ) print(F"Best solution: {best_sol}, with total distance: {best_cost}." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
36
0
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" A__ = FileLock(str(tmpdir / 'foo.lock' ) ) A__ = FileLock(str(tmpdir / 'foo.lock' ) ) A__ = 0.0_1 with locka.acquire(): with pytest.raises(_lowerCamelCase ): A__ = time.time() locka.acquire(_lowerCamelCase ) assert time.time() - _start > timeout def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" A__ = "a" * 1_000 + ".lock" A__ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(_lowerCamelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A__ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_lowerCamelCase ): locka.acquire(0 )
221
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin _snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = BartphoTokenizer lowerCamelCase__ = False lowerCamelCase__ = True def snake_case__ ( self): '''simple docstring''' super().setUp() _lowerCAmelCase : str = ["▁This", "▁is", "▁a", "▁t", "est"] _lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a)))) _lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"} _lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"]) with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp: for token in vocab_tokens: fp.write(f"{token} {vocab_tokens[token]}\n") _lowerCAmelCase : Optional[Any] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def snake_case__ ( self, **__a): '''simple docstring''' kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname, **__a) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = "This is a là test" _lowerCAmelCase : Optional[int] = "This is a<unk><unk> test" return input_text, output_text def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : Optional[int] = BartphoTokenizer(__a, self.monolingual_vocab_file, **self.special_tokens_map) _lowerCAmelCase : List[Any] = "This is a là test" _lowerCAmelCase : str = "▁This ▁is ▁a ▁l à ▁t est".split() _lowerCAmelCase : str = tokenizer.tokenize(__a) self.assertListEqual(__a, __a) _lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token] _lowerCAmelCase : List[str] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
36
0
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : Any = MgpstrTokenizer UpperCAmelCase__ : int = False UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : str = False def snake_case_ ( self ) -> Any: super().setUp() # fmt: off UpperCamelCase : Union[str, Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on UpperCamelCase : List[str] = dict(zip(__a, range(len(__a ) ) ) ) UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(__a ) + '\n' ) def snake_case_ ( self, **SCREAMING_SNAKE_CASE_ ) -> List[Any]: return MgpstrTokenizer.from_pretrained(self.tmpdirname, **__a ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: UpperCamelCase : str = "tester" UpperCamelCase : List[str] = "tester" return input_text, output_text @unittest.skip('MGP-STR always lower cases letters.' ) def snake_case_ ( self ) -> Any: pass def snake_case_ ( self ) -> str: UpperCamelCase : List[str] = self.get_tokenizers(do_lower_case=__a ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCamelCase : str = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({'cls_token': special_token} ) UpperCamelCase : List[str] = tokenizer.encode([special_token], add_special_tokens=__a ) self.assertEqual(len(__a ), 1 ) UpperCamelCase : Optional[Any] = tokenizer.decode(__a, skip_special_tokens=__a ) self.assertTrue(special_token not in decoded ) def snake_case_ ( self ) -> int: UpperCamelCase : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCamelCase : Union[str, Any] = self.get_input_output_texts(__a ) UpperCamelCase : int = tokenizer.tokenize(__a ) UpperCamelCase : Dict = tokenizer.convert_tokens_to_ids(__a ) UpperCamelCase : Dict = tokenizer.encode(__a, add_special_tokens=__a ) self.assertListEqual(__a, __a ) UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(__a ) self.assertNotEqual(len(__a ), 0 ) UpperCamelCase : List[str] = tokenizer.decode(__a ) self.assertIsInstance(__a, __a ) self.assertEqual(text_a.replace(' ', '' ), __a ) @unittest.skip('MGP-STR tokenizer only handles one sequence.' ) def snake_case_ ( self ) -> Optional[int]: pass @unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' ) def snake_case_ ( self ) -> Union[str, Any]: pass
119
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' def constraint_to_multiple_of(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=None ): _lowerCAmelCase : Tuple = round(val / multiple ) * multiple if max_val is not None and x > max_val: _lowerCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: _lowerCAmelCase : List[str] = math.ceil(val / multiple ) * multiple return x _lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(_lowerCamelCase , _lowerCamelCase ) else output_size _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_image_size(_lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Any = output_size # determine new height and width _lowerCAmelCase : List[Any] = output_height / input_height _lowerCAmelCase : Any = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _lowerCAmelCase : Union[str, Any] = scale_width else: # fit height _lowerCAmelCase : Union[str, Any] = scale_height _lowerCAmelCase : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCamelCase ) _lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCamelCase ) return (new_height, new_width) class UpperCAmelCase_ ( a): lowerCamelCase__ = ['pixel_values'] def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = False, __a = 1, __a = True, __a = 1 / 255, __a = True, __a = None, __a = None, **__a, ): '''simple docstring''' super().__init__(**__a) _lowerCAmelCase : Any = size if size is not None else {"height": 384, "width": 384} _lowerCAmelCase : Optional[int] = get_size_dict(__a) _lowerCAmelCase : Optional[Any] = do_resize _lowerCAmelCase : Dict = size _lowerCAmelCase : Any = keep_aspect_ratio _lowerCAmelCase : str = ensure_multiple_of _lowerCAmelCase : str = resample _lowerCAmelCase : Dict = do_rescale _lowerCAmelCase : Optional[int] = rescale_factor _lowerCAmelCase : Dict = do_normalize _lowerCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self, __a, __a, __a = False, __a = 1, __a = PILImageResampling.BICUBIC, __a = None, **__a, ): '''simple docstring''' _lowerCAmelCase : List[Any] = get_size_dict(__a) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}") _lowerCAmelCase : List[Any] = get_resize_output_image_size( __a, output_size=(size["height"], size["width"]), keep_aspect_ratio=__a, multiple=__a, ) return resize(__a, size=__a, resample=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a = None, **__a, ): '''simple docstring''' return rescale(__a, scale=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ): '''simple docstring''' return normalize(__a, mean=__a, std=__a, data_format=__a, **__a) def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ): '''simple docstring''' _lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase : List[Any] = size if size is not None else self.size _lowerCAmelCase : str = get_size_dict(__a) _lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _lowerCAmelCase : int = resample if resample is not None else self.resample _lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std _lowerCAmelCase : Optional[Any] = make_list_of_images(__a) if not valid_images(__a): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. _lowerCAmelCase : List[Any] = [to_numpy_array(__a) for image in images] if do_resize: _lowerCAmelCase : Any = [self.resize(image=__a, size=__a, resample=__a) for image in images] if do_rescale: _lowerCAmelCase : List[str] = [self.rescale(image=__a, scale=__a) for image in images] if do_normalize: _lowerCAmelCase : Dict = [self.normalize(image=__a, mean=__a, std=__a) for image in images] _lowerCAmelCase : List[str] = [to_channel_dimension_format(__a, __a) for image in images] _lowerCAmelCase : Optional[Any] = {"pixel_values": images} return BatchFeature(data=__a, tensor_type=__a) def snake_case__ ( self, __a, __a = None): '''simple docstring''' _lowerCAmelCase : Optional[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__a) != len(__a): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits") if is_torch_tensor(__a): _lowerCAmelCase : List[Any] = target_sizes.numpy() _lowerCAmelCase : Dict = [] for idx in range(len(__a)): _lowerCAmelCase : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a) _lowerCAmelCase : int = resized_logits[0].argmax(dim=0) semantic_segmentation.append(__a) else: _lowerCAmelCase : Dict = logits.argmax(dim=1) _lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
36
0
"""simple docstring""" from __future__ import annotations def snake_case ( A__ ,A__ = None ,A__ = None ): if start is None: UpperCAmelCase_ : Union[str, Any] = 0 if end is None: UpperCAmelCase_ : int = len(_lowerCamelCase ) - 1 if start >= end: return UpperCAmelCase_ : Union[str, Any] = (start + end) // 2 slowsort(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) slowsort(_lowerCamelCase ,mid + 1 ,_lowerCamelCase ) if sequence[end] < sequence[mid]: UpperCAmelCase_ : Union[str, Any] = sequence[mid], sequence[end] slowsort(_lowerCamelCase ,_lowerCamelCase ,end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
268
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = "huggingface/label-files" _lowerCAmelCase : int = "imagenet-1k-id2label.json" _lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()} _lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} _lowerCAmelCase : Tuple = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" _lowerCAmelCase : Optional[int] = BitConfig( conv_layer=_lowerCamelCase , num_labels=1_000 , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , ) return config def A ( _lowerCamelCase ): '''simple docstring''' if "stem.conv" in name: _lowerCAmelCase : List[str] = name.replace("stem.conv" , "bit.embedder.convolution" ) if "blocks" in name: _lowerCAmelCase : Any = name.replace("blocks" , "layers" ) if "head.fc" in name: _lowerCAmelCase : Optional[Any] = name.replace("head.fc" , "classifier.1" ) if name.startswith("norm" ): _lowerCAmelCase : Any = "bit." + name if "bit" not in name and "classifier" not in name: _lowerCAmelCase : Dict = "bit.encoder." + name return name def A ( ): '''simple docstring''' _lowerCAmelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCAmelCase : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ): '''simple docstring''' _lowerCAmelCase : Dict = get_config(_lowerCamelCase ) # load original model from timm _lowerCAmelCase : int = create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() # load state_dict of original model _lowerCAmelCase : Any = timm_model.state_dict() for key in state_dict.copy().keys(): _lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase ) _lowerCAmelCase : Tuple = val.squeeze() if "head" in key else val # load HuggingFace model _lowerCAmelCase : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.eval() model.load_state_dict(_lowerCamelCase ) # create image processor _lowerCAmelCase : Dict = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) ) _lowerCAmelCase : Optional[int] = transform.transforms _lowerCAmelCase : Tuple = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _lowerCAmelCase : Tuple = BitImageProcessor( do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _lowerCAmelCase : Optional[int] = prepare_img() _lowerCAmelCase : Any = transform(_lowerCamelCase ).unsqueeze(0 ) _lowerCAmelCase : Optional[int] = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) # verify logits with torch.no_grad(): _lowerCAmelCase : Tuple = model(_lowerCamelCase ) _lowerCAmelCase : str = outputs.logits print("Logits:" , logits[0, :3] ) print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] ) _lowerCAmelCase : Union[str, Any] = timm_model(_lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(F"Pushing model {model_name} and processor to the hub" ) model.push_to_hub(F"ybelkada/{model_name}" ) processor.push_to_hub(F"ybelkada/{model_name}" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) _snake_case = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
36
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json", } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''efficientnet''' def __init__( self , lowerCamelCase__ = 3 , lowerCamelCase__ = 600 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 3.1 , lowerCamelCase__ = 8 , lowerCamelCase__ = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase__ = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase__ = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase__ = [] , lowerCamelCase__ = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase__ = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase__ = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase__ = 0.25 , lowerCamelCase__ = "swish" , lowerCamelCase__ = 2_560 , lowerCamelCase__ = "mean" , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 0.0_01 , lowerCamelCase__ = 0.99 , lowerCamelCase__ = 0.5 , lowerCamelCase__ = 0.2 , **lowerCamelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__(**__a ) __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = width_coefficient __lowerCamelCase = depth_coefficient __lowerCamelCase = depth_divisor __lowerCamelCase = kernel_sizes __lowerCamelCase = in_channels __lowerCamelCase = out_channels __lowerCamelCase = depthwise_padding __lowerCamelCase = strides __lowerCamelCase = num_block_repeats __lowerCamelCase = expand_ratios __lowerCamelCase = squeeze_expansion_ratio __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dim __lowerCamelCase = pooling_type __lowerCamelCase = initializer_range __lowerCamelCase = batch_norm_eps __lowerCamelCase = batch_norm_momentum __lowerCamelCase = dropout_rate __lowerCamelCase = drop_connect_rate __lowerCamelCase = sum(__a ) * 4 class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = version.parse('''1.11''' ) @property def lowercase_ ( self ) -> Any: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowercase_ ( self ) -> Any: '''simple docstring''' return 1e-5
90
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _snake_case = logging.get_logger(__name__) _snake_case = { "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCAmelCase_ ( a , a): lowerCamelCase__ = 'swin' lowerCamelCase__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ): '''simple docstring''' super().__init__(**__a) _lowerCAmelCase : Any = image_size _lowerCAmelCase : Union[str, Any] = patch_size _lowerCAmelCase : Tuple = num_channels _lowerCAmelCase : List[Any] = embed_dim _lowerCAmelCase : Tuple = depths _lowerCAmelCase : Optional[Any] = len(__a) _lowerCAmelCase : int = num_heads _lowerCAmelCase : int = window_size _lowerCAmelCase : int = mlp_ratio _lowerCAmelCase : List[Any] = qkv_bias _lowerCAmelCase : str = hidden_dropout_prob _lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCAmelCase : Any = drop_path_rate _lowerCAmelCase : int = hidden_act _lowerCAmelCase : Tuple = use_absolute_embeddings _lowerCAmelCase : Optional[int] = layer_norm_eps _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : Tuple = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase : List[str] = int(embed_dim * 2 ** (len(__a) - 1)) _lowerCAmelCase : List[Any] = ["stem"] + [f"stage{idx}" for idx in range(1, len(__a) + 1)] _lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices( out_features=__a, out_indices=__a, stage_names=self.stage_names) class UpperCAmelCase_ ( a): lowerCamelCase__ = version.parse('1.11') @property def snake_case__ ( self): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def snake_case__ ( self): '''simple docstring''' return 1E-4
36
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
36
0
import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A_ ( _lowerCamelCase , unittest.TestCase ): lowerCAmelCase__ = AudioLDMPipeline lowerCAmelCase__ = TEXT_TO_AUDIO_PARAMS lowerCAmelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS lowerCAmelCase__ = frozenset( [ """num_inference_steps""", """num_waveforms_per_prompt""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) def _lowerCAmelCase (self :List[str] )-> Tuple: torch.manual_seed(0 ) __A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , ) __A = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) __A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __A = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) __A = ClapTextModelWithProjection(__a ) __A = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 ) __A = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , ) __A = SpeechTaHifiGan(__a ) __A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[Any] , _UpperCamelCase :int=0 )-> List[Any]: if str(__a ).startswith('''mps''' ): __A = torch.manual_seed(__a ) else: __A = torch.Generator(device=__a ).manual_seed(__a ) __A = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def _lowerCAmelCase (self :Any )-> Union[str, Any]: __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.get_dummy_components() __A = AudioLDMPipeline(**__a ) __A = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __A = self.get_dummy_inputs(__a ) __A = audioldm_pipe(**__a ) __A = output.audios[0] assert audio.ndim == 1 assert len(__a ) == 256 __A = audio[:10] __A = np.array( [-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _lowerCAmelCase (self :int )-> Tuple: __A = self.get_dummy_components() __A = AudioLDMPipeline(**__a ) __A = audioldm_pipe.to(__a ) __A = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __A = self.get_dummy_inputs(__a ) __A = 3 * [inputs["prompt"]] # forward __A = audioldm_pipe(**__a ) __A = output.audios[0] __A = self.get_dummy_inputs(__a ) __A = 3 * [inputs.pop('''prompt''' )] __A = audioldm_pipe.tokenizer( __a , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , ) __A = text_inputs["input_ids"].to(__a ) __A = audioldm_pipe.text_encoder( __a , ) __A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state __A = F.normalize(__a , dim=-1 ) __A = prompt_embeds # forward __A = audioldm_pipe(**__a ) __A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _lowerCAmelCase (self :List[Any] )-> Tuple: __A = self.get_dummy_components() __A = AudioLDMPipeline(**__a ) __A = audioldm_pipe.to(__a ) __A = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __A = self.get_dummy_inputs(__a ) __A = 3 * ["this is a negative prompt"] __A = negative_prompt __A = 3 * [inputs["prompt"]] # forward __A = audioldm_pipe(**__a ) __A = output.audios[0] __A = self.get_dummy_inputs(__a ) __A = 3 * [inputs.pop('''prompt''' )] __A = [] for p in [prompt, negative_prompt]: __A = audioldm_pipe.tokenizer( __a , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , ) __A = text_inputs["input_ids"].to(__a ) __A = audioldm_pipe.text_encoder( __a , ) __A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state __A = F.normalize(__a , dim=-1 ) embeds.append(__a ) __A = embeds # forward __A = audioldm_pipe(**__a ) __A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _lowerCAmelCase (self :Tuple )-> str: __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.get_dummy_components() __A = PNDMScheduler(skip_prk_steps=__a ) __A = AudioLDMPipeline(**__a ) __A = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __A = self.get_dummy_inputs(__a ) __A = "egg cracking" __A = audioldm_pipe(**__a , negative_prompt=__a ) __A = output.audios[0] assert audio.ndim == 1 assert len(__a ) == 256 __A = audio[:10] __A = np.array( [-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _lowerCAmelCase (self :Optional[Any] )-> Optional[int]: __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.get_dummy_components() __A = PNDMScheduler(skip_prk_steps=__a ) __A = AudioLDMPipeline(**__a ) __A = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __A = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) __A = audioldm_pipe(__a , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts __A = 2 __A = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt __A = 2 __A = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts __A = 2 __A = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _lowerCAmelCase (self :Optional[Any] )-> Optional[Any]: __A = "cpu" # ensure determinism for the device-dependent torch.Generator __A = self.get_dummy_components() __A = AudioLDMPipeline(**__a ) __A = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __A = audioldm_pipe.vocoder.config.sampling_rate __A = self.get_dummy_inputs(__a ) __A = audioldm_pipe(audio_length_in_s=0.0_1_6 , **__a ) __A = output.audios[0] assert audio.ndim == 1 assert len(__a ) / vocoder_sampling_rate == 0.0_1_6 __A = audioldm_pipe(audio_length_in_s=0.0_3_2 , **__a ) __A = output.audios[0] assert audio.ndim == 1 assert len(__a ) / vocoder_sampling_rate == 0.0_3_2 def _lowerCAmelCase (self :Optional[Any] )-> List[str]: __A = self.get_dummy_components() __A = AudioLDMPipeline(**__a ) __A = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __A = ["hey"] __A = audioldm_pipe(__a , num_inference_steps=1 ) __A = output.audios.shape assert audio_shape == (1, 256) __A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 __A = SpeechTaHifiGan(__a ).to(__a ) __A = audioldm_pipe(__a , num_inference_steps=1 ) __A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _lowerCAmelCase (self :Union[str, Any] )-> Any: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a ) def _lowerCAmelCase (self :Optional[int] )-> Union[str, Any]: self._test_inference_batch_single_identical(test_mean_pixel_difference=__a ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def _lowerCAmelCase (self :Union[str, Any] )-> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a ) @slow class A_ ( unittest.TestCase ): def _lowerCAmelCase (self :Optional[int] )-> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :str , _UpperCamelCase :Optional[int]="cpu" , _UpperCamelCase :List[Any]=torch.floataa , _UpperCamelCase :Union[str, Any]=0 )-> List[str]: __A = torch.Generator(device=__a ).manual_seed(__a ) __A = np.random.RandomState(__a ).standard_normal((1, 8, 128, 16) ) __A = torch.from_numpy(__a ).to(device=__a , dtype=__a ) __A = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def _lowerCAmelCase (self :Optional[int] )-> str: __A = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) __A = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __A = self.get_inputs(__a ) __A = 25 __A = audioldm_pipe(**__a ).audios[0] assert audio.ndim == 1 assert len(__a ) == 8_1920 __A = audio[7_7230:7_7240] __A = np.array( [-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] ) __A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _lowerCAmelCase (self :int )-> Any: __A = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) __A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) __A = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __A = self.get_inputs(__a ) __A = audioldm_pipe(**__a ).audios[0] assert audio.ndim == 1 assert len(__a ) == 8_1920 __A = audio[2_7780:2_7790] __A = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] ) __A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
117
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version _snake_case = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if got_ver is None or want_ver is None: raise ValueError( F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" F" reinstalling {pkg}." ) if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ): raise ImportError( F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def A ( _lowerCamelCase , _lowerCamelCase = None ): '''simple docstring''' _lowerCAmelCase : List[str] = F"\n{hint}" if hint is not None else "" # non-versioned check if re.match(r"^[\w_\-\d]+$" , _lowerCamelCase ): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = requirement, None, None else: _lowerCAmelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" F" got {requirement}" ) _lowerCAmelCase , _lowerCAmelCase : Dict = match[0] _lowerCAmelCase : Any = want_full.split("," ) # there could be multiple requirements _lowerCAmelCase : Optional[int] = {} for w in want_range: _lowerCAmelCase : Any = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCamelCase ) if not match: raise ValueError( "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," F" but got {requirement}" ) _lowerCAmelCase , _lowerCAmelCase : Tuple = match[0] _lowerCAmelCase : Union[str, Any] = want_ver if op not in ops: raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" ) # special case if pkg == "python": _lowerCAmelCase : Tuple = ".".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return # check if any version is installed try: _lowerCAmelCase : Any = importlib.metadata.version(_lowerCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" return require_version(_lowerCamelCase , _lowerCamelCase )
36
0
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _lowercase (unittest.TestCase ): '''simple docstring''' def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = tf.convert_to_tensor( [ [ 8.2_220_991, # 3rd highest value; idx. 0 -0.5_620_044, 5.23_229_752, 4.0_386_393, -6.8_798_378, -0.54_785_802, -3.2_012_153, 2.92_777_176, 1.88_171_953, 7.35_341_276, # 5th highest value; idx. 9 8.43_207_833, # 2nd highest value; idx. 10 -9.85_711_836, -5.96_209_236, -1.13_039_161, -7.1_115_294, -0.8_369_633, -5.3_186_408, 7.06_427_407, 0.81_369_344, -0.82_023_817, -5.9_179_796, 0.58_813_443, -6.99_778_438, 4.71_551_189, -0.18_771_637, 7.44_020_759, # 4th highest value; idx. 25 9.38_450_987, # 1st highest value; idx. 26 2.12_662_941, -9.32_562_038, 2.35_652_522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_425_518, 4.53_139_238, -5.57_510_464, -6.28_030_699, -7.19_529_503, -4.02_122_551, 1.39_337_037, -6.06_707_057, 1.59_480_517, -9.643_119, 0.03_907_799, 0.67_231_762, -8.88_206_726, 6.27_115_922, # 4th highest value; idx. 13 2.28_520_723, 4.82_767_506, 4.30_421_368, 8.8_275_313, # 2nd highest value; idx. 17 5.44_029_958, # 5th highest value; idx. 18 -4.4_735_794, 7.38_579_536, # 3rd highest value; idx. 20 -2.91_051_663, 2.61_946_077, -2.5_674_762, -9.48_959_302, -4.02_922_645, -1.35_416_918, 9.67_702_323, # 1st highest value; idx. 27 -5.89_478_553, 1.85_370_467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) UpperCamelCase_ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above UpperCamelCase_ = tf.convert_to_tensor( [8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above UpperCamelCase_ = tf_top_k_top_p_filtering(__a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) UpperCamelCase_ = output[output != -float("inf" )] UpperCamelCase_ = tf.cast( tf.where(tf.not_equal(__a , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(__a , __a , rtol=1e-12 ) tf.debugging.assert_equal(__a , __a ) @require_tf class _lowercase (unittest.TestCase , a_ ): '''simple docstring''' if is_tf_available(): lowercase__ = { """AutoModelForCausalLM""": TFAutoModelForCausalLM, """AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq, """AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM, """AutoModelForVision2Seq""": TFAutoModelForVisionaSeq, """LogitsProcessorList""": TFLogitsProcessorList, """MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor, """create_tensor_fn""": tf.convert_to_tensor, """floats_tensor""": floats_tensor, """return_tensors""": """tf""", } @slow def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) UpperCamelCase_ = 2 UpperCamelCase_ = 2 class _lowercase (tf.Module ): '''simple docstring''' def __init__( self , snake_case__ ): '''simple docstring''' super(__a , self ).__init__() UpperCamelCase_ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ), tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ), ) , jit_compile=__a , ) def _lowerCamelCase ( self , snake_case__ , snake_case__ ): '''simple docstring''' UpperCamelCase_ = self.model.generate( input_ids=__a , attention_mask=__a , max_new_tokens=__a , return_dict_in_generate=__a , ) return {"sequences": outputs["sequences"]} UpperCamelCase_ = [[2, 0], [102, 103]] UpperCamelCase_ = [[1, 0], [1, 1]] UpperCamelCase_ = DummyModel(model=__a ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__a , __a , signatures={"serving_default": dummy_model.serving} ) UpperCamelCase_ = tf.saved_model.load(__a ).signatures["serving_default"] for batch_size in range(1 , len(__a ) + 1 ): UpperCamelCase_ = { "input_ids": tf.constant(dummy_input_ids[:batch_size] ), "attention_mask": tf.constant(dummy_attention_masks[:batch_size] ), } UpperCamelCase_ = serving_func(**__a )["sequences"] UpperCamelCase_ = test_model.generate(**__a , max_new_tokens=__a ) tf.debugging.assert_equal(__a , __a ) @slow def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) UpperCamelCase_ = 1 UpperCamelCase_ = 2 class _lowercase (tf.Module ): '''simple docstring''' def __init__( self , snake_case__ ): '''simple docstring''' super(__a , self ).__init__() UpperCamelCase_ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ), ) , jit_compile=__a , ) def _lowerCamelCase ( self , snake_case__ , snake_case__ ): '''simple docstring''' UpperCamelCase_ = self.model.generate( input_ids=__a , attention_mask=__a , max_new_tokens=__a , return_dict_in_generate=__a , ) return {"sequences": outputs["sequences"]} UpperCamelCase_ = [[2], [102, 103]] UpperCamelCase_ = [[1], [1, 1]] UpperCamelCase_ = DummyModel(model=__a ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(__a , __a , signatures={"serving_default": dummy_model.serving} ) UpperCamelCase_ = tf.saved_model.load(__a ).signatures["serving_default"] for input_row in range(len(__a ) ): UpperCamelCase_ = { "input_ids": tf.constant([dummy_input_ids[input_row]] ), "attention_mask": tf.constant([dummy_attention_masks[input_row]] ), } UpperCamelCase_ = serving_func(**__a )["sequences"] UpperCamelCase_ = test_model.generate(**__a , max_new_tokens=__a ) tf.debugging.assert_equal(__a , __a ) @slow @require_tensorflow_text def _lowerCamelCase ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=__a ) class _lowercase (tf.keras.layers.Layer ): '''simple docstring''' def __init__( self ): '''simple docstring''' super().__init__() UpperCamelCase_ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(__a , "spiece.model" ) , "rb" ).read() ) UpperCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" ) def _lowerCamelCase ( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' UpperCamelCase_ = self.tokenizer.tokenize(__a ) UpperCamelCase_ = text.pad_model_inputs( __a , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) UpperCamelCase_ = self.model.generate(input_ids=__a , attention_mask=__a ) return self.tokenizer.detokenize(__a ) UpperCamelCase_ = CompleteSentenceTransformer() UpperCamelCase_ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" ) UpperCamelCase_ = complete_model(__a ) UpperCamelCase_ = tf.keras.Model(__a , __a ) keras_model.save(__a ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } UpperCamelCase_ = 14 UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) UpperCamelCase_ = "Hello, my dog is cute and" UpperCamelCase_ = tokenizer(__a , return_tensors="tf" ) UpperCamelCase_ = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) UpperCamelCase_ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) UpperCamelCase_ = model.generate(**__a , eos_token_id=__a , **__a ) self.assertTrue(expectation == len(generated_tokens[0] ) ) UpperCamelCase_ = [638, 198] with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) UpperCamelCase_ = model.generate(**__a , eos_token_id=__a , **__a ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" ) UpperCamelCase_ = "Hugging Face is a technology company based in New York and Paris." UpperCamelCase_ = bart_tokenizer(__a , return_tensors="tf" ).input_ids UpperCamelCase_ = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" ) UpperCamelCase_ = bart_model.generate(__a ).numpy() class _lowercase (a_ ): '''simple docstring''' def _lowerCamelCase ( self , snake_case__ , snake_case__=None , **snake_case__ ): '''simple docstring''' return super().call(__a , **__a ) UpperCamelCase_ = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" ) UpperCamelCase_ = bart_model.generate(__a , foo="bar" ).numpy() self.assertTrue(np.array_equal(__a , __a ) ) class _lowercase (bart_model.model.encoder.__class__ ): '''simple docstring''' def _lowerCamelCase ( self , snake_case__ , **snake_case__ ): '''simple docstring''' return super().call(__a , **__a ) UpperCamelCase_ = FakeEncoder(bart_model.config , bart_model.model.shared ) UpperCamelCase_ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) UpperCamelCase_ = bart_model.generate(__a ).numpy() with self.assertRaises(__a ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(__a , foo="bar" )
128
import argparse from collections import defaultdict import yaml _snake_case = "docs/source/en/_toctree.yml" def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = defaultdict(_lowerCamelCase ) _lowerCAmelCase : Any = [] _lowerCAmelCase : List[str] = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"local": doc["local"], "title": doc["title"]} ) else: new_doc_list.append(_lowerCamelCase ) _lowerCAmelCase : Optional[Any] = new_doc_list _lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1] _lowerCAmelCase : str = [] for duplicate_key in duplicates: _lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} ) if len(_lowerCamelCase ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " "others." ) # Only add this once new_doc.append({"local": duplicate_key, "title": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] ) _lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(_lowerCamelCase ) > 1: raise ValueError("{doc_list} has two 'overview' docs which is not allowed." ) overview_doc.extend(_lowerCamelCase ) # Sort return overview_doc def A ( _lowerCamelCase=False ): '''simple docstring''' with open(_lowerCamelCase , encoding="utf-8" ) as f: _lowerCAmelCase : int = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase : List[str] = content[api_idx]["sections"] # Then to the model doc _lowerCAmelCase : Union[str, Any] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"] _lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase ) _lowerCAmelCase : int = False if new_scheduler_doc != scheduler_doc: _lowerCAmelCase : List[Any] = True if overwrite: _lowerCAmelCase : Dict = new_scheduler_doc if diff: if overwrite: _lowerCAmelCase : Tuple = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) def A ( _lowerCamelCase=False ): '''simple docstring''' with open(_lowerCamelCase , encoding="utf-8" ) as f: _lowerCAmelCase : Tuple = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase : Optional[int] = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase : int = content[api_idx]["sections"] # Then to the model doc _lowerCAmelCase : List[str] = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _lowerCAmelCase : Dict = False _lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"] _lowerCAmelCase : Tuple = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _lowerCAmelCase : List[Any] = pipeline_doc["section"] _lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase ) if overwrite: _lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc new_pipeline_docs.append(_lowerCamelCase ) # sort overall pipeline doc _lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase ) if new_pipeline_docs != pipeline_docs: _lowerCAmelCase : Dict = True if overwrite: _lowerCAmelCase : Optional[int] = new_pipeline_docs if diff: if overwrite: _lowerCAmelCase : Optional[int] = api_doc with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) ) else: raise ValueError( "The model doc part of the table of content is not properly sorted, run `make style` to fix this." ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") _snake_case = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
36
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : str = set() # Replace all the whitespace in our sentence lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCamelCase ) == 26 def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : Any = [False] * 26 for char in input_str: if char.islower(): lowerCAmelCase__ : Optional[Any] = True elif char.isupper(): lowerCAmelCase__ : Any = True return all(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from timeit import timeit lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest""" print(timeit("""is_pangram()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } _lowerCAmelCase = '''▁''' class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = VOCAB_FILES_NAMES __lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="[SEP]" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="[CLS]" ,__UpperCAmelCase="[MASK]" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase__ : Tuple = ( AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ,normalized=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token ) lowerCAmelCase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : str = do_lower_case lowerCAmelCase__ : int = remove_space lowerCAmelCase__ : Tuple = keep_accents lowerCAmelCase__ : Any = vocab_file lowerCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def UpperCAmelCase_ ( self ) -> Optional[int]: return len(self.sp_model ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Any: lowerCAmelCase__ : Optional[Any] = self.__dict__.copy() lowerCAmelCase__ : Optional[Any] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]: if self.remove_space: lowerCAmelCase__ : int = """ """.join(inputs.strip().split() ) else: lowerCAmelCase__ : str = inputs lowerCAmelCase__ : Tuple = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" ) if not self.keep_accents: lowerCAmelCase__ : Any = unicodedata.normalize("""NFKD""" ,__UpperCAmelCase ) lowerCAmelCase__ : Dict = """""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: lowerCAmelCase__ : Tuple = outputs.lower() return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) lowerCAmelCase__ : str = [] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase ,"""""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowerCAmelCase__ : str = cur_pieces[1:] else: lowerCAmelCase__ : int = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: return self.sp_model.IdToPiece(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : str = [] lowerCAmelCase__ : Tuple = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : List[str] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : int = [self.sep_token_id] lowerCAmelCase__ : Dict = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : List[str] = [self.sep_token_id] lowerCAmelCase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : int = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
37
1
'''simple docstring''' from typing import Any class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = data lowerCAmelCase__ : str = None class lowerCAmelCase_: '''simple docstring''' def __init__( self ) -> int: lowerCAmelCase__ : List[str] = None def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = self.head while temp is not None: print(temp.data ,end=""" """ ) lowerCAmelCase__ : List[str] = temp.next print() def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: lowerCAmelCase__ : Union[str, Any] = Node(__UpperCAmelCase ) lowerCAmelCase__ : str = self.head lowerCAmelCase__ : Dict = new_node def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: if node_data_a == node_data_a: return else: lowerCAmelCase__ : Optional[Any] = self.head while node_a is not None and node_a.data != node_data_a: lowerCAmelCase__ : List[Any] = node_a.next lowerCAmelCase__ : Optional[Any] = self.head while node_a is not None and node_a.data != node_data_a: lowerCAmelCase__ : Optional[Any] = node_a.next if node_a is None or node_a is None: return lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = node_a.data, node_a.data if __name__ == "__main__": _lowerCAmelCase = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
37
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } _lowerCAmelCase = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } _lowerCAmelCase = { '''ctrl''': 256, } _lowerCAmelCase = { '''Pregnancy''': 16_8629, '''Christianity''': 7675, '''Explain''': 10_6423, '''Fitness''': 6_3440, '''Saving''': 6_3163, '''Ask''': 2_7171, '''Ass''': 9_5985, '''Joke''': 16_3509, '''Questions''': 4_5622, '''Thoughts''': 4_9605, '''Retail''': 5_2342, '''Feminism''': 16_4338, '''Writing''': 1_1992, '''Atheism''': 19_2263, '''Netflix''': 4_8616, '''Computing''': 3_9639, '''Opinion''': 4_3213, '''Alone''': 4_4967, '''Funny''': 5_8917, '''Gaming''': 4_0358, '''Human''': 4088, '''India''': 1331, '''Joker''': 7_7138, '''Diet''': 3_6206, '''Legal''': 1_1859, '''Norman''': 4939, '''Tip''': 7_2689, '''Weight''': 5_2343, '''Movies''': 4_6273, '''Running''': 2_3425, '''Science''': 2090, '''Horror''': 3_7793, '''Confession''': 6_0572, '''Finance''': 1_2250, '''Politics''': 1_6360, '''Scary''': 19_1985, '''Support''': 1_2654, '''Technologies''': 3_2516, '''Teenage''': 6_6160, '''Event''': 3_2769, '''Learned''': 6_7460, '''Notion''': 18_2770, '''Wikipedia''': 3_7583, '''Books''': 6665, '''Extract''': 7_6050, '''Confessions''': 10_2701, '''Conspiracy''': 7_5932, '''Links''': 6_3674, '''Narcissus''': 15_0425, '''Relationship''': 5_4766, '''Relationships''': 13_4796, '''Reviews''': 4_1671, '''News''': 4256, '''Translation''': 2_6820, '''multilingual''': 12_8406, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = set() lowerCAmelCase__ : str = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : List[Any] = char lowerCAmelCase__ : Optional[Any] = set(UpperCamelCase ) return pairs class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP __lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Any = CONTROL_CODES def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<unk>" ,**__UpperCAmelCase ) -> Optional[int]: super().__init__(unk_token=__UpperCAmelCase ,**__UpperCAmelCase ) with open(__UpperCAmelCase ,encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : int = json.load(__UpperCAmelCase ) lowerCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges] lowerCAmelCase__ : int = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase__ : List[Any] = {} @property def UpperCAmelCase_ ( self ) -> Optional[Any]: return len(self.encoder ) def UpperCAmelCase_ ( self ) -> Optional[Any]: return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: if token in self.cache: return self.cache[token] lowerCAmelCase__ : Optional[Any] = tuple(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase__ : List[str] = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Dict = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase__ : Optional[Any] = word.index(__UpperCAmelCase ,__UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : Dict = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : Tuple = tuple(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase__ : Dict = get_pairs(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = """@@ """.join(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = word[:-4] lowerCAmelCase__ : Optional[Any] = word return word def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = [] lowerCAmelCase__ : Any = re.findall(R"""\S+\n?""" ,__UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: return self.decoder.get(__UpperCAmelCase ,self.unk_token ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip() return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : List[Any] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCAmelCase ,ensure_ascii=__UpperCAmelCase ) + """\n""" ) lowerCAmelCase__ : int = 0 with open(__UpperCAmelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : Dict = token_index writer.write(""" """.join(__UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
37
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = '''▁''' _lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowerCAmelCase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowerCAmelCase = { '''xlm-roberta-base''': 512, '''xlm-roberta-large''': 512, '''xlm-roberta-large-finetuned-conll02-dutch''': 512, '''xlm-roberta-large-finetuned-conll02-spanish''': 512, '''xlm-roberta-large-finetuned-conll03-english''': 512, '''xlm-roberta-large-finetuned-conll03-german''': 512, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = VOCAB_FILES_NAMES __lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[str] = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ : Optional[int] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token lowerCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) lowerCAmelCase__ : Tuple = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase__ : int = 1 lowerCAmelCase__ : Dict = len(self.sp_model ) + self.fairseq_offset lowerCAmelCase__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> int: lowerCAmelCase__ : List[str] = self.__dict__.copy() lowerCAmelCase__ : Optional[int] = None lowerCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ : List[Any] = [self.cls_token_id] lowerCAmelCase__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]: lowerCAmelCase__ : List[Any] = [self.sep_token_id] lowerCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase_ ( self ) -> str: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase__ : Dict = self.sp_model.PieceToId(__UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Tuple = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase ,""" """ ).strip() return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Optional[Any] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : Dict = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : str = set() # Replace all the whitespace in our sentence lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCamelCase ) == 26 def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : Any = [False] * 26 for char in input_str: if char.islower(): lowerCAmelCase__ : Optional[Any] = True elif char.isupper(): lowerCAmelCase__ : Any = True return all(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from timeit import timeit lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest""" print(timeit("""is_pangram()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return base * power(UpperCamelCase , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') _lowerCAmelCase = int(input('''Enter the base: ''').strip()) _lowerCAmelCase = int(input('''Enter the exponent: ''').strip()) _lowerCAmelCase = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents _lowerCAmelCase = 1 / result print(F"""{base} to the power of {exponent} is {result}""")
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = abs(UpperCamelCase ) lowerCAmelCase__ : List[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None: lowerCAmelCase__ : str = f"""{func.__name__}({value})""" lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" ) print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" ) for value in (262144, 1125899906842624, 1267650600228229401496703205376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(UpperCamelCase , UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() ) # one length string lowerCAmelCase__ : Optional[int] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase__ : List[Any] = single_char_strings[ch] lowerCAmelCase__ : List[Any] = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase__ : Dict = sum(two_char_strings.values() ) lowerCAmelCase__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase__ : Union[str, Any] = cha + cha if sequence in two_char_strings: lowerCAmelCase__ : Dict = two_char_strings[sequence] lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore lowerCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
37
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers _lowerCAmelCase = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
37
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Tuple = '''unispeech''' def __init__( self ,__UpperCAmelCase=32 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase="group" ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) ,__UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) ,__UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) ,__UpperCAmelCase=False ,__UpperCAmelCase=128 ,__UpperCAmelCase=16 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0_5 ,__UpperCAmelCase=10 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=10 ,__UpperCAmelCase=0 ,__UpperCAmelCase=320 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=100 ,__UpperCAmelCase=256 ,__UpperCAmelCase=256 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase="mean" ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=256 ,__UpperCAmelCase=80 ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.5 ,**__UpperCAmelCase ,) -> List[Any]: super().__init__(**__UpperCAmelCase ,pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : Tuple = feat_extract_activation lowerCAmelCase__ : Optional[int] = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : str = list(__UpperCAmelCase ) lowerCAmelCase__ : Dict = conv_bias lowerCAmelCase__ : Optional[int] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : Optional[int] = intermediate_size lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = hidden_dropout lowerCAmelCase__ : Tuple = attention_dropout lowerCAmelCase__ : Union[str, Any] = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Optional[Any] = final_dropout lowerCAmelCase__ : Optional[Any] = layerdrop lowerCAmelCase__ : Optional[Any] = layer_norm_eps lowerCAmelCase__ : Optional[int] = initializer_range lowerCAmelCase__ : str = num_ctc_classes lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[int] = do_stable_layer_norm lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Tuple = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Any = apply_spec_augment lowerCAmelCase__ : Dict = mask_time_prob lowerCAmelCase__ : Dict = mask_time_length lowerCAmelCase__ : Union[str, Any] = mask_time_min_masks lowerCAmelCase__ : Optional[Any] = mask_feature_prob lowerCAmelCase__ : List[str] = mask_feature_length lowerCAmelCase__ : Optional[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCAmelCase__ : str = num_codevectors_per_group lowerCAmelCase__ : Optional[int] = num_codevector_groups lowerCAmelCase__ : Dict = contrastive_logits_temperature lowerCAmelCase__ : Tuple = feat_quantizer_dropout lowerCAmelCase__ : Tuple = num_negatives lowerCAmelCase__ : Union[str, Any] = codevector_dim lowerCAmelCase__ : str = proj_codevector_dim lowerCAmelCase__ : Optional[Any] = diversity_loss_weight # ctc loss lowerCAmelCase__ : str = ctc_loss_reduction lowerCAmelCase__ : Tuple = ctc_zero_infinity # pretraining loss lowerCAmelCase__ : Union[str, Any] = replace_prob @property def UpperCAmelCase_ ( self ) -> Any: return functools.reduce(operator.mul ,self.conv_stride ,1 )
37
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''', '''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : int = '''xlm-roberta-xl''' def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str: super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : int = num_hidden_layers lowerCAmelCase__ : str = num_attention_heads lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : Dict = intermediate_size lowerCAmelCase__ : List[Any] = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : Optional[int] = max_position_embeddings lowerCAmelCase__ : List[str] = type_vocab_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Tuple = layer_norm_eps lowerCAmelCase__ : int = position_embedding_type lowerCAmelCase__ : Optional[Any] = use_cache lowerCAmelCase__ : Optional[Any] = classifier_dropout class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' @property def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
37
1
'''simple docstring''' import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Union[str, Any] = PegasusTokenizer __lowercase : int = PegasusTokenizerFast __lowercase : str = True __lowercase : List[Any] = True def UpperCAmelCase_ ( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Optional[Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase_ ( self ) -> Tuple: return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]: return ("This is a test", "This is a test") def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = """</s>""" lowerCAmelCase__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) ,__UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<pad>""" ) self.assertEqual(vocab_keys[1] ,"""</s>""" ) self.assertEqual(vocab_keys[-1] ,"""v""" ) self.assertEqual(len(__UpperCAmelCase ) ,1103 ) def UpperCAmelCase_ ( self ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size ,1103 ) def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : List[Any] = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) lowerCAmelCase__ : List[str] = rust_tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : str = py_tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[Any]: lowerCAmelCase__ : List[str] = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : Union[str, Any] = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Union[str, Any] = tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Any = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : Any = """To ensure a smooth flow of bank resolutions.""" lowerCAmelCase__ : str = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Optional[Any] = tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = ["""This is going to be way too long.""" * 150, """short example"""] lowerCAmelCase__ : List[Any] = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ : str = self._large_tokenizer(__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" ) lowerCAmelCase__ : Dict = self._large_tokenizer( text_target=__UpperCAmelCase ,max_length=5 ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def UpperCAmelCase_ ( self ) -> str: # fmt: off lowerCAmelCase__ : Any = {"""input_ids""": [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase ,model_name="""google/bigbird-pegasus-large-arxiv""" ,revision="""ba85d0851d708441f91440d509690f1ab6353415""" ,) @require_sentencepiece @require_tokenizers class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Union[str, Any] = PegasusTokenizer __lowercase : List[Any] = PegasusTokenizerFast __lowercase : Union[str, Any] = True __lowercase : Optional[int] = True def UpperCAmelCase_ ( self ) -> Optional[int]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ,offset=0 ,mask_token_sent=__UpperCAmelCase ,mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase_ ( self ) -> List[Any]: return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> PegasusTokenizer: return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: return ("This is a test", "This is a test") def UpperCAmelCase_ ( self ) -> Dict: lowerCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) lowerCAmelCase__ : str = rust_tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Any = py_tokenizer([raw_input_str] ,return_tensors=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase ) @require_torch def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Tuple = ["""This is going to be way too long.""" * 1000, """short example"""] lowerCAmelCase__ : str = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ : Optional[int] = self._large_tokenizer(__UpperCAmelCase ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" ) lowerCAmelCase__ : Optional[Any] = self._large_tokenizer( text_target=__UpperCAmelCase ,max_length=5 ,padding=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : Any = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) lowerCAmelCase__ : Dict = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,)
37
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : List[str] = analyze_text(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase__ : List[Any] = sum(single_char_strings.values() ) # one length string lowerCAmelCase__ : Optional[int] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase__ : List[Any] = single_char_strings[ch] lowerCAmelCase__ : List[Any] = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase__ : Dict = sum(two_char_strings.values() ) lowerCAmelCase__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase__ : Union[str, Any] = cha + cha if sequence in two_char_strings: lowerCAmelCase__ : Dict = two_char_strings[sequence] lowerCAmelCase__ : Tuple = int(UpperCamelCase ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = Counter() # type: ignore lowerCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
37
1
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCAmelCase = 16 _lowerCAmelCase = 32 def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = 16 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase__ : Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase__ : Union[str, Any] = datasets.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase__ : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase__ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase__ : Optional[Any] = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase__ : Dict = 8 else: lowerCAmelCase__ : Optional[int] = None return tokenizer.pad( UpperCamelCase , padding="""longest""" , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCAmelCase__ : Optional[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) lowerCAmelCase__ : List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCAmelCase = mocked_dataloaders # noqa: F811 def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase ) == "1": lowerCAmelCase__ : Tuple = 2 # Initialize accelerator lowerCAmelCase__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase__ : Dict = config["""lr"""] lowerCAmelCase__ : Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase__ : List[Any] = int(config["""seed"""] ) lowerCAmelCase__ : Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase__ : Optional[Any] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=UpperCamelCase ) def inner_training_loop(UpperCamelCase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase__ : int = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = get_dataloaders(UpperCamelCase , UpperCamelCase ) # Instantiate scheduler lowerCAmelCase__ : Dict = get_linear_schedule_with_warmup( optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Now we train the model for epoch in range(UpperCamelCase ): model.train() for step, batch in enumerate(UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCAmelCase__ : Any = model(**UpperCamelCase ) lowerCAmelCase__ : Tuple = outputs.loss accelerator.backward(UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase__ : str = model(**UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase__ , lowerCAmelCase__ : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=UpperCamelCase , references=UpperCamelCase , ) lowerCAmelCase__ : Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , UpperCamelCase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=UpperCamelCase , default=UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCAmelCase__ : Union[str, Any] = parser.parse_args() lowerCAmelCase__ : Any = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": main()
37
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ): """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [] for old_item in old_list: lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" ) lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" ) lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" ) lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" ) lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" ) lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ): """simple docstring""" lowerCAmelCase__ : int = [] for old_item in old_list: lowerCAmelCase__ : List[str] = old_item lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" ) lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCAmelCase__ : Any = old_checkpoint[path] lowerCAmelCase__ : int = old_tensor.shape[0] // 3 lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3 lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 ) lowerCAmelCase__ : int = query.reshape(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase ) for path in paths: lowerCAmelCase__ : Any = path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0] else: lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]] def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : str = {} lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""] lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""] lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""] lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""] lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""] lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""] lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""] lowerCAmelCase__ : str = checkpoint["""out.2.weight"""] lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) lowerCAmelCase__ : Optional[Any] = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the middle blocks only lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) lowerCAmelCase__ : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } # Retrieves the keys for the output blocks only lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) lowerCAmelCase__ : List[Any] = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(UpperCamelCase ) } for i in range(1 , UpperCamelCase ): lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: lowerCAmelCase__ : Optional[int] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] lowerCAmelCase__ : Tuple = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase ) if len(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""input_blocks.{i}.1""", """new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : List[str] = { f"""input_blocks.{i}.1.qkv.bias""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { """key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , ) lowerCAmelCase__ : Dict = middle_blocks[0] lowerCAmelCase__ : Union[str, Any] = middle_blocks[1] lowerCAmelCase__ : Dict = middle_blocks[2] lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase ) assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = { """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase ) for i in range(UpperCamelCase ): lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1) lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]] lowerCAmelCase__ : Union[str, Any] = {} for layer in output_block_layers: lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCamelCase ) else: lowerCAmelCase__ : str = [layer_name] if len(UpperCamelCase ) > 1: lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] lowerCAmelCase__ : int = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(UpperCamelCase ) == 2: lowerCAmelCase__ : Tuple = [] if len(UpperCamelCase ): lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase ) lowerCAmelCase__ : Tuple = { """old""": f"""output_blocks.{i}.1""", """new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } lowerCAmelCase__ : Tuple = { f"""output_blocks.{i}.1.qkv.bias""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { """key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", """query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", """value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , ) else: lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] ) lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] ) lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: _lowerCAmelCase = json.loads(f.read()) _lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] _lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: _lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) _lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
37
1
'''simple docstring''' from __future__ import annotations from fractions import Fraction def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Any = 11 lowerCAmelCase__ : Dict = int("""1""" + """0""" * digit_len ) for num in range(UpperCamelCase , UpperCamelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(UpperCamelCase , UpperCamelCase ): solutions.append(f"""{num}/{den}""" ) den += 1 num += 1 lowerCAmelCase__ : str = 10 return solutions def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 2 ): """simple docstring""" lowerCAmelCase__ : str = 1.0 for fraction in fraction_list(UpperCamelCase ): lowerCAmelCase__ : str = Fraction(UpperCamelCase ) result *= frac.denominator / frac.numerator return int(UpperCamelCase ) if __name__ == "__main__": print(solution())
37
'''simple docstring''' from math import sqrt def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" lowerCAmelCase__ : int = True # 0 and 1 are none primes. if number <= 1: lowerCAmelCase__ : Optional[Any] = False for divisor in range(2 , int(round(sqrt(UpperCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: lowerCAmelCase__ : Any = False break # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'status' must been from type bool" return status def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N lowerCAmelCase__ : List[str] = list(range(2 , n + 1 ) ) lowerCAmelCase__ : str = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(UpperCamelCase ) ): for j in range(i + 1 , len(UpperCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): lowerCAmelCase__ : List[Any] = 0 # filters actual prime numbers. lowerCAmelCase__ : List[Any] = [x for x in begin_list if x != 0] # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" lowerCAmelCase__ : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(UpperCamelCase ): ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0" lowerCAmelCase__ : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. lowerCAmelCase__ : Dict = 2 lowerCAmelCase__ : Dict = number if number == 0 or number == 1: ans.append(UpperCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(UpperCamelCase ): while quotient != 1: if is_prime(UpperCamelCase ) and (quotient % factor == 0): ans.append(UpperCamelCase ) quotient /= factor else: factor += 1 else: ans.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type list" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : Optional[int] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = max(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 # prime factorization of 'number' lowerCAmelCase__ : List[str] = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = min(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ), "'ans' must been from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 == 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , UpperCamelCase ), "compare bust been from type bool" return number % 2 != 0 def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (number > 2) and is_even(UpperCamelCase ) ), "'number' must been an int, even and > 2" lowerCAmelCase__ : Dict = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' lowerCAmelCase__ : Dict = get_prime_numbers(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase ) # run variable for while-loops. lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = None # exit variable. for break up the loops lowerCAmelCase__ : Any = True while i < len_pn and loop: lowerCAmelCase__ : List[Any] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: lowerCAmelCase__ : Optional[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (len(UpperCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 0 while numbera != 0: lowerCAmelCase__ : Any = numbera % numbera lowerCAmelCase__ : str = numbera lowerCAmelCase__ : List[str] = rest # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." lowerCAmelCase__ : int = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' lowerCAmelCase__ : int = prime_factorization(UpperCamelCase ) lowerCAmelCase__ : Any = prime_factorization(UpperCamelCase ) elif numbera == 1 or numbera == 1: lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = max(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : str = 0 lowerCAmelCase__ : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: lowerCAmelCase__ : int = prime_fac_a.count(UpperCamelCase ) lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(max(UpperCamelCase , UpperCamelCase ) ): ans *= n else: lowerCAmelCase__ : Any = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: lowerCAmelCase__ : Optional[int] = prime_fac_a.count(UpperCamelCase ) for _ in range(UpperCamelCase ): ans *= n done.append(UpperCamelCase ) # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'number' must been a positive int" lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : Tuple = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(UpperCamelCase ): ans += 1 # precondition assert isinstance(UpperCamelCase , UpperCamelCase ) and is_prime( UpperCamelCase ), "'ans' must been a prime number and from type int" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( is_prime(UpperCamelCase ) and is_prime(UpperCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" lowerCAmelCase__ : Dict = p_number_a + 1 # jump to the next number lowerCAmelCase__ : List[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 while number < p_number_a: ans.append(UpperCamelCase ) number += 1 # fetch the next prime number. while not is_prime(UpperCamelCase ): number += 1 # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and ans[0] != p_number_a and ans[len(UpperCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1" lowerCAmelCase__ : List[Any] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(UpperCamelCase ) # precondition assert ans[0] == 1 and ans[len(UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" lowerCAmelCase__ : Optional[int] = get_divisors(UpperCamelCase ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (divisors[0] == 1) and (divisors[len(UpperCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. lowerCAmelCase__ : int = gcd(abs(UpperCamelCase ) , abs(UpperCamelCase ) ) # precondition assert ( isinstance(UpperCamelCase , UpperCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0" lowerCAmelCase__ : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" assert isinstance(UpperCamelCase , UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0" lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Any = 1 lowerCAmelCase__ : Optional[Any] = 1 # this will be return for _ in range(n - 1 ): lowerCAmelCase__ : Dict = ans ans += fiba lowerCAmelCase__ : str = tmp return ans
37
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not nums: raise ValueError("""List is empty""" ) return sum(UpperCamelCase ) / len(UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' from sklearn.metrics import fa_score, matthews_corrcoef import datasets from .record_evaluation import evaluate as evaluate_record _lowerCAmelCase = '''\ @article{wang2019superglue, title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, journal={arXiv preprint arXiv:1905.00537}, year={2019} } ''' _lowerCAmelCase = '''\ SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after GLUE with a new set of more difficult language understanding tasks, improved resources, and a new public leaderboard. ''' _lowerCAmelCase = ''' Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset. Args: predictions: list of predictions to score. Depending on the SuperGlUE subset: - for \'record\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'prediction_text\': the predicted answer text - for \'multirc\': list of question-answer dictionaries with the following keys: - \'idx\': index of the question-answer pair as specified by the dataset - \'prediction\': the predicted answer label - otherwise: list of predicted labels references: list of reference labels. Depending on the SuperGLUE subset: - for \'record\': list of question-answers dictionaries with the following keys: - \'idx\': index of the question as specified by the dataset - \'answers\': list of possible answers - otherwise: list of reference labels Returns: depending on the SuperGLUE subset: - for \'record\': - \'exact_match\': Exact match between answer and gold answer - \'f1\': F1 score - for \'multirc\': - \'exact_match\': Exact match between answer and gold answer - \'f1_m\': Per-question macro-F1 score - \'f1_a\': Average F1 score over all answers - for \'axb\': \'matthews_correlation\': Matthew Correlation - for \'cb\': - \'accuracy\': Accuracy - \'f1\': F1 score - for all others: - \'accuracy\': Accuracy Examples: >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"] >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\') >>> predictions = [0, 1] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\') >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}] >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\') >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}] >>> references = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0} >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = super_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'matthews_correlation\': 1.0} ''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" return float((preds == labels).mean() ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ): """simple docstring""" lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : List[str] = {} for id_pred, label in zip(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}""" lowerCAmelCase__ : Dict = id_pred["""prediction"""] if question_id in question_map: question_map[question_id].append((pred, label) ) else: lowerCAmelCase__ : Optional[int] = [(pred, label)] lowerCAmelCase__ , lowerCAmelCase__ : int = [], [] for question, preds_labels in question_map.items(): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase ) lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" ) fas.append(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) ) ems.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) ) lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase ) lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) ) return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a} @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: if self.config_name not in [ "boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,) def UpperCAmelCase_ ( self ) -> str: if self.config_name == "record": return { "predictions": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "prediction_text": datasets.Value("""string""" ), }, "references": { "idx": { "passage": datasets.Value("""int64""" ), "query": datasets.Value("""int64""" ), }, "answers": datasets.Sequence(datasets.Value("""string""" ) ), }, } elif self.config_name == "multirc": return { "predictions": { "idx": { "answer": datasets.Value("""int64""" ), "paragraph": datasets.Value("""int64""" ), "question": datasets.Value("""int64""" ), }, "prediction": datasets.Value("""int64""" ), }, "references": datasets.Value("""int64""" ), } else: return { "predictions": datasets.Value("""int64""" ), "references": datasets.Value("""int64""" ), } def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: if self.config_name == "axb": return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )} elif self.config_name == "cb": return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" ) elif self.config_name == "record": lowerCAmelCase__ : Optional[Any] = [ { """qas""": [ {"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]} for ref in references ] } ] lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions} return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0] elif self.config_name == "multirc": return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase ) elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]: return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
37
1
'''simple docstring''' from jiwer import compute_measures import datasets _lowerCAmelCase = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' _lowerCAmelCase = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' _lowerCAmelCase = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/jitsi/jiwer/"""] ,reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=False ) -> Union[str, Any]: if concatenate_texts: return compute_measures(__UpperCAmelCase ,__UpperCAmelCase )["wer"] else: lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : Dict = 0 for prediction, reference in zip(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = compute_measures(__UpperCAmelCase ,__UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
37
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: return None class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: return None class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' __lowercase : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase_ ( self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Any: from transformers import BertModel lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__UpperCAmelCase ) ) vocab_file.flush() lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) ) model.save_pretrained(__UpperCAmelCase ) self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase ) @require_tf @slow def UpperCAmelCase_ ( self ) -> List[str]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) return path except Exception as e: self.fail(__UpperCAmelCase ) @require_torch @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers import BertModel lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" ) @require_tf @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Optional[int]: from transformers import TFBertModel lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCAmelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCAmelCase ) ,1 ) self.assertEqual(len(__UpperCAmelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] ,"""input_ids""" ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
37
1
'''simple docstring''' from collections.abc import Callable def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : float = a lowerCAmelCase__ : float = b if function(UpperCamelCase ) == 0: # one of the a or b is a root for the function return a elif function(UpperCamelCase ) == 0: return b elif ( function(UpperCamelCase ) * function(UpperCamelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("""could not find root in given interval.""" ) else: lowerCAmelCase__ : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(UpperCamelCase ) == 0: return mid elif function(UpperCamelCase ) * function(UpperCamelCase ) < 0: lowerCAmelCase__ : Optional[Any] = mid else: lowerCAmelCase__ : Union[str, Any] = mid lowerCAmelCase__ : Any = start + (end - start) / 2.0 return mid def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
37
'''simple docstring''' from maths.prime_factors import prime_factors def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
37
1
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,**__UpperCAmelCase ) -> Tuple: super().__init__(**__UpperCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : int = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int: lowerCAmelCase__ : str = load_image(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCAmelCase__ : List[Any] = candidate_labels lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [text_inputs] return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" ) lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,__UpperCAmelCase ): lowerCAmelCase__ : int = text_inputs[0] else: # Batching case. lowerCAmelCase__ : Dict = text_inputs[0][0] lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" ) lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0] if self.framework == "pt": lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ : Optional[Any] = probs.tolist() if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = [scores] elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 ) lowerCAmelCase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] ) ] return result
37
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} _lowerCAmelCase = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } _lowerCAmelCase = { '''AI-Sweden/gpt-sw3-126m''': 2048, '''AI-Sweden/gpt-sw3-350m''': 2048, '''AI-Sweden/gpt-sw3-1.6b''': 2048, '''AI-Sweden/gpt-sw3-6.7b''': 2048, '''AI-Sweden/gpt-sw3-20b''': 2048, } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Dict = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None: lowerCAmelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ : Dict = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase__ : Tuple = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase__ : Union[str, Any] = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase__ : Dict = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase__ : Any = unk_token if pad_token is None else pad_token lowerCAmelCase__ : Dict = eos_token if bos_token is None else bos_token else: lowerCAmelCase__ : List[str] = """<pad>""" if pad_token is None else pad_token lowerCAmelCase__ : Optional[int] = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__UpperCAmelCase ,remove_space=__UpperCAmelCase ,keep_accents=__UpperCAmelCase ,bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,) lowerCAmelCase__ : Optional[int] = do_lower_case lowerCAmelCase__ : Dict = remove_space lowerCAmelCase__ : Optional[Any] = keep_accents lowerCAmelCase__ : int = vocab_file lowerCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase__ : int = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase__ : List[str] = re.compile( F"""[{''.join(map(__UpperCAmelCase ,list(range(0 ,9 ) ) + list(range(11 ,32 ) ) + list(range(127 ,160 ) ) + [160, 173, 8203] ) )}]""" ) def __getstate__( self ) -> Any: lowerCAmelCase__ : int = self.__dict__.copy() lowerCAmelCase__ : Optional[int] = None return state def __setstate__( self ,__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[str] = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase_ ( self ) -> int: return len(self.sp_model ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : Tuple = self.non_printing_characters_re.sub("""""" ,__UpperCAmelCase ) # Normalize whitespaces lowerCAmelCase__ : List[Any] = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase__ : List[Any] = unicodedata.normalize("""NFC""" ,__UpperCAmelCase ) return text def UpperCAmelCase_ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : List[Any] = self.preprocess_text(__UpperCAmelCase ) return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int: return self.sp_model.PieceToId(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.IdToPiece(__UpperCAmelCase ) @staticmethod def UpperCAmelCase_ ( __UpperCAmelCase ) -> str: return out_string def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: lowerCAmelCase__ : int = [] lowerCAmelCase__ : Optional[int] = """""" lowerCAmelCase__ : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Optional[Any] = [] else: current_sub_tokens.append(__UpperCAmelCase ) lowerCAmelCase__ : Any = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string def UpperCAmelCase_ ( self ) -> Dict[str, int]: lowerCAmelCase__ : Optional[int] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Optional[int] = os.path.join( __UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase ,"""wb""" ) as fi: lowerCAmelCase__ : str = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Tuple = self.preprocess_text(__UpperCAmelCase ) lowerCAmelCase__ : int = self.sp_model.encode(__UpperCAmelCase ) else: lowerCAmelCase__ : int = [self.preprocess_text(__UpperCAmelCase ) for t in text] lowerCAmelCase__ : Any = self.sp_model.encode(__UpperCAmelCase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase__ : Tuple = torch.tensor(__UpperCAmelCase ) return token_ids def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str: return self.sp_model.decode(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[int]: lowerCAmelCase__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] lowerCAmelCase__ : Any = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__UpperCAmelCase ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=__UpperCAmelCase )
37
1
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Tuple = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg""" lowerCAmelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("""RGB""" ) return image def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") ) # fmt: on return rename_keys def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Tuple = dct.pop(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = val def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCAmelCase__ : List[Any] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" ) lowerCAmelCase__ : str = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict lowerCAmelCase__ : Dict = torch.cat((q_bias, torch.zeros_like(UpperCamelCase , requires_grad=UpperCamelCase ), v_bias) ) lowerCAmelCase__ : Optional[int] = qkv_bias def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = 364 if """coco""" in model_name else 224 lowerCAmelCase__ : str = InstructBlipVisionConfig(image_size=UpperCamelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowerCAmelCase__ : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCAmelCase__ : Optional[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowerCAmelCase__ : Optional[int] = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=32001 ).to_dict() elif "vicuna-13b" in model_name: lowerCAmelCase__ : Optional[Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=32001 ).to_dict() else: raise ValueError("""Model name not supported""" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowerCAmelCase__ : Dict = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict() lowerCAmelCase__ : List[Any] = InstructBlipConfig(vision_config=UpperCamelCase , text_config=UpperCamelCase , qformer_config=UpperCamelCase ) return config, image_size @torch.no_grad() def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=False ): """simple docstring""" lowerCAmelCase__ : int = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" ) qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} ) if "t5" in model_name: lowerCAmelCase__ : Optional[int] = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowerCAmelCase__ : Optional[int] = LlamaTokenizerFast.from_pretrained( """huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" ) tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} ) lowerCAmelCase__ , lowerCAmelCase__ : List[str] = get_blipa_config(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = InstructBlipForConditionalGeneration(UpperCamelCase ).eval() lowerCAmelCase__ : Optional[Any] = { """instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""), """instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""), """instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""), """instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""), } lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) lowerCAmelCase__ : str = """cuda:1""" if torch.cuda.is_available() else """cpu""" lowerCAmelCase__ : Tuple = """cuda:2""" if torch.cuda.is_available() else """cpu""" lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = load_model_and_preprocess( name=UpperCamelCase , model_type=UpperCamelCase , is_eval=UpperCamelCase , device=UpperCamelCase ) original_model.eval() print("""Done!""" ) # update state dict keys lowerCAmelCase__ : Union[str, Any] = original_model.state_dict() lowerCAmelCase__ : List[Any] = create_rename_keys(UpperCamelCase ) for src, dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCAmelCase__ : List[str] = state_dict.pop(UpperCamelCase ) if key.startswith("""Qformer.bert""" ): lowerCAmelCase__ : Optional[int] = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: lowerCAmelCase__ : Tuple = key.replace("""self""" , """attention""" ) if "llm_proj" in key: lowerCAmelCase__ : int = key.replace("""llm_proj""" , """language_projection""" ) if "t5_proj" in key: lowerCAmelCase__ : Optional[int] = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""llm_model""" ): lowerCAmelCase__ : Optional[int] = key.replace("""llm_model""" , """language_model""" ) if key.startswith("""t5""" ): lowerCAmelCase__ : Tuple = key.replace("""t5""" , """language""" ) lowerCAmelCase__ : int = val # read in qv biases read_in_q_v_bias(UpperCamelCase , UpperCamelCase ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) lowerCAmelCase__ : List[str] = load_demo_image() lowerCAmelCase__ : Optional[Any] = """What is unusual about this image?""" # create processor lowerCAmelCase__ : Dict = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=UpperCamelCase , image_std=UpperCamelCase ) lowerCAmelCase__ : List[str] = InstructBlipProcessor( image_processor=UpperCamelCase , tokenizer=UpperCamelCase , qformer_tokenizer=UpperCamelCase , ) lowerCAmelCase__ : str = processor(images=UpperCamelCase , text=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase ) # make sure processor creates exact same pixel values lowerCAmelCase__ : Tuple = vis_processors["""eval"""](UpperCamelCase ).unsqueeze(0 ).to(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCamelCase ) original_model.to(UpperCamelCase ) hf_model.to(UpperCamelCase ) with torch.no_grad(): if "vicuna" in model_name: lowerCAmelCase__ : Union[str, Any] = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits lowerCAmelCase__ : Union[str, Any] = hf_model(**UpperCamelCase ).logits else: lowerCAmelCase__ : List[Any] = original_model( {"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits lowerCAmelCase__ : Optional[Any] = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(UpperCamelCase ) lowerCAmelCase__ : Any = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowerCAmelCase__ : Any = hf_model(**UpperCamelCase , labels=UpperCamelCase ).logits print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowerCAmelCase__ : Any = 1e-4 if """vicuna""" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , UpperCamelCase , atol=UpperCamelCase ) print("""Looks ok!""" ) print("""Generating with original model...""" ) lowerCAmelCase__ : Any = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("""Generating with HF model...""" ) lowerCAmelCase__ : Optional[int] = hf_model.generate( **UpperCamelCase , do_sample=UpperCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowerCAmelCase__ : Any = 2 print("""Original generation:""" , UpperCamelCase ) lowerCAmelCase__ : str = processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) lowerCAmelCase__ : str = [text.strip() for text in output_text] print("""HF generation:""" , UpperCamelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(UpperCamelCase ) hf_model.save_pretrained(UpperCamelCase ) if push_to_hub: processor.push_to_hub(f"""Salesforce/{model_name}""" ) hf_model.push_to_hub(f"""Salesforce/{model_name}""" ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() _lowerCAmelCase = [ '''instructblip-vicuna-7b''', '''instructblip-vicuna-13b''', '''instructblip-flan-t5-xl''', '''instructblip-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''instructblip-flan-t5-xl''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) _lowerCAmelCase = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : str = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Dict = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) ) @slow def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : Union[str, Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) lowerCAmelCase__ : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house lowerCAmelCase__ : Dict = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ : Union[str, Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,__UpperCAmelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,__UpperCAmelCase ,atol=1E-3 ) )
37
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" if num < 0: return False lowerCAmelCase__ : int = num lowerCAmelCase__ : int = 0 while num > 0: lowerCAmelCase__ : Union[str, Any] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: with open(UpperCamelCase , """rb""" ) as flax_state_f: lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(UpperCamelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values() if any(UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase__ : Dict = jax.tree_util.tree_map( lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase ) lowerCAmelCase__ : Any = """""" lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" ) lowerCAmelCase__ : Optional[int] = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(UpperCamelCase ): lowerCAmelCase__ : List[str] = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase ) # remove from missing keys missing_keys.remove(UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(UpperCamelCase ) pt_model.load_state_dict(UpperCamelCase ) # re-transform missing_keys to list lowerCAmelCase__ : Optional[int] = list(UpperCamelCase ) if len(UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(UpperCamelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) return pt_model
37
1
'''simple docstring''' class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = "" ,__UpperCAmelCase = False ) -> None: # Mapping from the first character of the prefix of the node lowerCAmelCase__ : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word lowerCAmelCase__ : List[str] = is_leaf lowerCAmelCase__ : Optional[Any] = prefix def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> tuple[str, str, str]: lowerCAmelCase__ : Union[str, Any] = 0 for q, w in zip(self.prefix ,__UpperCAmelCase ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: for word in words: self.insert(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None: # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: lowerCAmelCase__ : str = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: lowerCAmelCase__ : List[str] = RadixNode(prefix=__UpperCAmelCase ,is_leaf=__UpperCAmelCase ) else: lowerCAmelCase__ : List[Any] = self.nodes[word[0]] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = incoming_node.match( __UpperCAmelCase ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(__UpperCAmelCase ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: lowerCAmelCase__ : Tuple = remaining_prefix lowerCAmelCase__ : int = self.nodes[matching_string[0]] lowerCAmelCase__ : Tuple = RadixNode(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : str = aux_node if remaining_word == "": lowerCAmelCase__ : List[str] = True else: self.nodes[matching_string[0]].insert(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> bool: lowerCAmelCase__ : List[str] = self.nodes.get(word[0] ,__UpperCAmelCase ) if not incoming_node: return False else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = incoming_node.match( __UpperCAmelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> bool: lowerCAmelCase__ : Optional[int] = self.nodes.get(word[0] ,__UpperCAmelCase ) if not incoming_node: return False else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = incoming_node.match( __UpperCAmelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(__UpperCAmelCase ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: lowerCAmelCase__ : List[Any] = list(self.nodes.values() )[0] lowerCAmelCase__ : Union[str, Any] = merging_node.is_leaf self.prefix += merging_node.prefix lowerCAmelCase__ : Dict = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: lowerCAmelCase__ : Dict = False # If there is 1 edge, we merge it with its child else: lowerCAmelCase__ : List[Any] = list(incoming_node.nodes.values() )[0] lowerCAmelCase__ : Optional[Any] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix lowerCAmelCase__ : int = merging_node.nodes return True def UpperCAmelCase_ ( self ,__UpperCAmelCase = 0 ) -> None: if self.prefix != "": print("""-""" * height ,self.prefix ,""" (leaf)""" if self.is_leaf else """""" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = """banana bananas bandana band apple all beast""".split() lowerCAmelCase__ : Optional[Any] = RadixNode() root.insert_many(UpperCamelCase ) assert all(root.find(UpperCamelCase ) for word in words ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" assert test_trie() def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : List[str] = RadixNode() lowerCAmelCase__ : str = """banana bananas bandanas bandana band apple all beast""".split() root.insert_many(UpperCamelCase ) print("""Words:""" , UpperCamelCase ) print("""Tree:""" ) root.print_tree() if __name__ == "__main__": main()
37
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str: lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Optional[int] = is_training lowerCAmelCase__ : Dict = use_auxiliary_loss lowerCAmelCase__ : Union[str, Any] = num_queries lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : List[str] = min_size lowerCAmelCase__ : int = max_size lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[Any] = mask_feature_size def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase ) lowerCAmelCase__ : Any = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5 ).float() lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long() lowerCAmelCase__ : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase_ ( self ) -> Dict: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig( decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs() lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]: with torch.no_grad(): lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model( pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) ) @require_torch class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __lowercase : int = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __lowercase : Union[str, Any] = False __lowercase : Dict = False __lowercase : Tuple = False __lowercase : List[Any] = False def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : str = MaskFormerModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def UpperCAmelCase_ ( self ) -> List[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def UpperCAmelCase_ ( self ) -> str: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def UpperCAmelCase_ ( self ) -> Any: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ ( self ) -> List[str]: pass def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Dict = [*signature.parameters.keys()] lowerCAmelCase__ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__UpperCAmelCase ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2 lowerCAmelCase__ : Any = { """pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(), } lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase_ ( self ) -> int: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Dict = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase_ ( self ) -> List[str]: # only MaskFormerForInstanceSegmentation has the loss lowerCAmelCase__ : Tuple = self.all_model_classes[1] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCAmelCase__ : List[Any] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _lowerCAmelCase = 1e-4 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase_ ( self ) -> List[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def UpperCAmelCase_ ( self ) -> Any: lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = self.default_image_processor lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : List[str] = prepare_img() lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : Optional[int] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Tuple = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Union[str, Any] = torch.tensor( [ [1.65_12E00, -5.25_72E00, -3.35_19E00], [3.61_69E-02, -5.90_25E00, -2.93_13E00], [1.07_66E-04, -7.76_30E00, -5.12_63E00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> str: lowerCAmelCase__ : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase ) lowerCAmelCase__ : str = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) ) with torch.no_grad(): lowerCAmelCase__ : str = model(**__UpperCAmelCase ) # masks_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,) lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) # class_queries_logits lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCAmelCase__ : Tuple = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : str = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) lowerCAmelCase__ : Dict = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,) lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): lowerCAmelCase__ : Any = model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
37
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : List[str] = '''wav2vec2''' def __init__( self ,__UpperCAmelCase=32 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase="group" ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) ,__UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) ,__UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) ,__UpperCAmelCase=False ,__UpperCAmelCase=128 ,__UpperCAmelCase=16 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,__UpperCAmelCase=0.0_5 ,__UpperCAmelCase=10 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=10 ,__UpperCAmelCase=0 ,__UpperCAmelCase=320 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=100 ,__UpperCAmelCase=256 ,__UpperCAmelCase=256 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase="sum" ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=256 ,__UpperCAmelCase=(512, 512, 512, 512, 1500) ,__UpperCAmelCase=(5, 3, 3, 1, 1) ,__UpperCAmelCase=(1, 2, 3, 1, 1) ,__UpperCAmelCase=512 ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=2 ,__UpperCAmelCase=False ,__UpperCAmelCase=3 ,__UpperCAmelCase=2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Any: super().__init__(**__UpperCAmelCase ,pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = hidden_size lowerCAmelCase__ : Union[str, Any] = feat_extract_norm lowerCAmelCase__ : int = feat_extract_activation lowerCAmelCase__ : Union[str, Any] = list(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = list(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = list(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = conv_bias lowerCAmelCase__ : int = num_conv_pos_embeddings lowerCAmelCase__ : List[Any] = num_conv_pos_embedding_groups lowerCAmelCase__ : Dict = len(self.conv_dim ) lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Union[str, Any] = intermediate_size lowerCAmelCase__ : List[str] = hidden_act lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : Dict = hidden_dropout lowerCAmelCase__ : Dict = attention_dropout lowerCAmelCase__ : Optional[Any] = activation_dropout lowerCAmelCase__ : Union[str, Any] = feat_proj_dropout lowerCAmelCase__ : str = final_dropout lowerCAmelCase__ : Union[str, Any] = layerdrop lowerCAmelCase__ : List[Any] = layer_norm_eps lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : int = vocab_size lowerCAmelCase__ : Dict = do_stable_layer_norm lowerCAmelCase__ : Optional[int] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : List[str] = apply_spec_augment lowerCAmelCase__ : Tuple = mask_time_prob lowerCAmelCase__ : Optional[Any] = mask_time_length lowerCAmelCase__ : str = mask_time_min_masks lowerCAmelCase__ : str = mask_feature_prob lowerCAmelCase__ : Union[str, Any] = mask_feature_length lowerCAmelCase__ : List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCAmelCase__ : Any = num_codevectors_per_group lowerCAmelCase__ : Any = num_codevector_groups lowerCAmelCase__ : Any = contrastive_logits_temperature lowerCAmelCase__ : str = feat_quantizer_dropout lowerCAmelCase__ : int = num_negatives lowerCAmelCase__ : Optional[int] = codevector_dim lowerCAmelCase__ : List[str] = proj_codevector_dim lowerCAmelCase__ : Optional[Any] = diversity_loss_weight # ctc loss lowerCAmelCase__ : int = ctc_loss_reduction lowerCAmelCase__ : List[str] = ctc_zero_infinity # adapter lowerCAmelCase__ : List[Any] = add_adapter lowerCAmelCase__ : Any = adapter_kernel_size lowerCAmelCase__ : List[Any] = adapter_stride lowerCAmelCase__ : List[str] = num_adapter_layers lowerCAmelCase__ : Any = output_hidden_size or hidden_size lowerCAmelCase__ : Union[str, Any] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCAmelCase__ : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : str = list(__UpperCAmelCase ) lowerCAmelCase__ : str = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = xvector_output_dim @property def UpperCAmelCase_ ( self ) -> Any: return functools.reduce(operator.mul ,self.conv_stride ,1 )
37
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowerCAmelCase = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
37
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''), ('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''), ('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''), ('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''), ('''input_blocks.0.0.weight''', '''conv_in.weight'''), ('''input_blocks.0.0.bias''', '''conv_in.bias'''), ('''out.0.weight''', '''conv_norm_out.weight'''), ('''out.0.bias''', '''conv_norm_out.bias'''), ('''out.2.weight''', '''conv_out.weight'''), ('''out.2.bias''', '''conv_out.bias'''), ] _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''in_layers.0''', '''norm1'''), ('''in_layers.2''', '''conv1'''), ('''out_layers.0''', '''norm2'''), ('''out_layers.3''', '''conv2'''), ('''emb_layers.1''', '''time_emb_proj'''), ('''skip_connection''', '''conv_shortcut'''), ] _lowerCAmelCase = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}.""" _lowerCAmelCase = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv.""" _lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _lowerCAmelCase = '''mid_block.attentions.0.''' _lowerCAmelCase = '''middle_block.1.''' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _lowerCAmelCase = F"""mid_block.resnets.{j}.""" _lowerCAmelCase = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: lowerCAmelCase__ : Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''nin_shortcut''', '''conv_shortcut'''), ('''norm_out''', '''conv_norm_out'''), ('''mid.attn_1.''', '''mid_block.attentions.0.'''), ] for i in range(4): # down_blocks have two resnets for j in range(2): _lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.""" _lowerCAmelCase = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0.""" _lowerCAmelCase = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}.""" _lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _lowerCAmelCase = F"""mid_block.resnets.{i}.""" _lowerCAmelCase = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''norm.''', '''group_norm.'''), ('''q.''', '''query.'''), ('''k.''', '''key.'''), ('''v.''', '''value.'''), ('''proj_out.''', '''proj_attn.'''), ] def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return w.reshape(*w.shape , 1 , 1 ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = v lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()} lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _lowerCAmelCase = [ # (stable-diffusion, HF Diffusers) ('''resblocks.''', '''text_model.encoder.layers.'''), ('''ln_1''', '''layer_norm1'''), ('''ln_2''', '''layer_norm2'''), ('''.c_fc.''', '''.fc1.'''), ('''.c_proj.''', '''.fc2.'''), ('''.attn''', '''.self_attn'''), ('''ln_final.''', '''transformer.text_model.final_layer_norm.'''), ('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''), ('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''), ] _lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _lowerCAmelCase = re.compile('''|'''.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2} def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = {} lowerCAmelCase__ : int = {} lowerCAmelCase__ : List[Any] = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )] lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: lowerCAmelCase__ : List[Any] = [None, None, None] lowerCAmelCase__ : Dict = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )] lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: lowerCAmelCase__ : Union[str, Any] = [None, None, None] lowerCAmelCase__ : Any = v continue lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase ) lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase ) return new_state_dict def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" return text_enc_dict if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.''' ) _lowerCAmelCase = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''') _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _lowerCAmelCase = load_file(unet_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(unet_path, map_location='''cpu''') if osp.exists(vae_path): _lowerCAmelCase = load_file(vae_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''') _lowerCAmelCase = torch.load(vae_path, map_location='''cpu''') if osp.exists(text_enc_path): _lowerCAmelCase = load_file(text_enc_path, device='''cpu''') else: _lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''') _lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''') # Convert the UNet model _lowerCAmelCase = convert_unet_state_dict(unet_state_dict) _lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _lowerCAmelCase = convert_vae_state_dict(vae_state_dict) _lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()} _lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()} else: _lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict) _lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _lowerCAmelCase = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _lowerCAmelCase = {'''state_dict''': state_dict} torch.save(state_dict, args.checkpoint_path)
37
1
'''simple docstring''' from datetime import datetime as dt import os from github import Github _lowerCAmelCase = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" lowerCAmelCase__ : str = Github(os.environ["""GITHUB_TOKEN"""] ) lowerCAmelCase__ : Optional[Any] = g.get_repo("""huggingface/transformers""" ) lowerCAmelCase__ : List[str] = repo.get_issues(state="""open""" ) for issue in open_issues: lowerCAmelCase__ : str = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = comments[0] if len(UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="""closed""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
37
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _lowerCAmelCase = datasets.logging.get_logger(__name__) _lowerCAmelCase = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' _lowerCAmelCase = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' _lowerCAmelCase = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' _lowerCAmelCase = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_( datasets.Metric ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ,id="""sequence""" ), """references""": datasets.Value("""string""" ,id="""sequence""" ), } ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,) def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) lowerCAmelCase__ : str = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: lowerCAmelCase__ : Union[str, Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowerCAmelCase__ : List[Any] = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase ) return {"scores": scores}
37
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> int: lowerCAmelCase__ : List[Any] = inspect.getfile(accelerate.test_utils ) lowerCAmelCase__ : int = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 lowerCAmelCase__ : Union[str, Any] = test_metrics @require_cpu def UpperCAmelCase_ ( self ) -> Dict: debug_launcher(self.test_metrics.main ,num_processes=1 ) @require_cpu def UpperCAmelCase_ ( self ) -> Union[str, Any]: debug_launcher(self.test_metrics.main ) @require_single_gpu def UpperCAmelCase_ ( self ) -> Any: self.test_metrics.main() @require_multi_gpu def UpperCAmelCase_ ( self ) -> Optional[Any]: print(F"""Found {torch.cuda.device_count()} devices.""" ) lowerCAmelCase__ : Any = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
37
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch _lowerCAmelCase = logging.get_logger(__name__) class lowerCAmelCase_: '''simple docstring''' def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str: if not conversation_id: lowerCAmelCase__ : List[str] = uuid.uuida() if past_user_inputs is None: lowerCAmelCase__ : List[Any] = [] if generated_responses is None: lowerCAmelCase__ : str = [] lowerCAmelCase__ : uuid.UUID = conversation_id lowerCAmelCase__ : List[str] = past_user_inputs lowerCAmelCase__ : List[str] = generated_responses lowerCAmelCase__ : Optional[str] = text def __eq__( self ,__UpperCAmelCase ) -> Dict: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]: if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) lowerCAmelCase__ : Optional[int] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowerCAmelCase__ : Optional[Any] = text def UpperCAmelCase_ ( self ) -> List[Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCAmelCase__ : Union[str, Any] = None def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple: self.generated_responses.append(__UpperCAmelCase ) def UpperCAmelCase_ ( self ) -> List[str]: for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ) -> Tuple: lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowerCAmelCase__ : Any = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( SCREAMING_SNAKE_CASE_ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple: super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCAmelCase__ : Tuple = self.tokenizer.eos_token def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : List[str] = {} if min_length_for_response is not None: lowerCAmelCase__ : Any = min_length_for_response if minimum_tokens is not None: lowerCAmelCase__ : Optional[int] = minimum_tokens if "max_length" in generate_kwargs: lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCAmelCase__ : int = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]: lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]: if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ): lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase ) if self.framework == "pt": lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCAmelCase__ : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowerCAmelCase__ : str = max_length - minimum_tokens lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:] lowerCAmelCase__ : str = model_inputs.pop("""conversation""" ) lowerCAmelCase__ : Union[str, Any] = max_length lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase ) if self.model.config.is_encoder_decoder: lowerCAmelCase__ : int = 1 else: lowerCAmelCase__ : int = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]: lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""] lowerCAmelCase__ : Tuple = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(__UpperCAmelCase ) return conversation def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict: lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id lowerCAmelCase__ : int = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) ) if len(__UpperCAmelCase ) > self.tokenizer.model_max_length: lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
37
1